text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import h5py
import numpy
from rdkit import Chem
from steps.featuregeneration.shared import substructure_feature_generator
from util import data_validation, misc, file_structure, file_util, logger, process_pool, constants, \
hdf5_util, multi_process_progressbar
class SaliencyMapSubstructureFeatureGeneration:
@staticmethod
def get_id():
return 'saliency_map_substructure_feature_generation'
@staticmethod
def get_name():
return 'Saliency Map Substructures'
@staticmethod
def get_parameters():
parameters = list()
parameters.append({'id': 'top_n', 'name': 'Top n', 'type': int, 'default': None, 'min': 1,
'description': 'The number of substructures that will be considered. Default: All'})
parameters.append({'id': 'min_score', 'name': 'Minimum score', 'type': float, 'default': None, 'min': 0,
'max': 1,
'description': 'The minimum score of substructures that will be considered. Default: All'})
parameters.append({'id': 'active', 'name': 'Active class', 'type': bool, 'default': True,
'description': 'Use substructures for the active class (True) or inactive class (False).'
' Default: Active'})
parameters.append({'id': 'count', 'name': 'Use counts', 'type': bool, 'default': True,
'description': 'Use counts instead of bits. Default: True'})
return parameters
@staticmethod
def check_prerequisites(global_parameters, local_parameters):
data_validation.validate_data_set(global_parameters)
@staticmethod
def get_result_file(global_parameters, local_parameters):
file_name = 'saliency_map_features.h5'
return file_util.resolve_subpath(file_structure.get_result_folder(global_parameters), file_name)
@staticmethod
def execute(global_parameters, local_parameters):
global_parameters[constants.GlobalParameters.feature_id] = 'saliency_map_substructures'
features_path = SaliencyMapSubstructureFeatureGeneration.get_result_file(global_parameters, local_parameters)
if file_util.file_exists(features_path):
logger.log('Skipping step: ' + features_path + ' already exists')
features_h5 = h5py.File(features_path, 'r')
feature_dimensions = features_h5[file_structure.Preprocessed.preprocessed].shape[1]
features_h5.close()
else:
saliency_map_substructures_path = global_parameters[constants.GlobalParameters.saliency_map_substructures_data]
substructures = load_substructures(saliency_map_substructures_path, local_parameters['top_n'],
local_parameters['min_score'], local_parameters['active'])
feature_dimensions = len(substructures)
data_h5 = h5py.File(file_structure.get_data_set_file(global_parameters), 'r')
smiles_data = data_h5[file_structure.DataSet.smiles][:]
data_h5.close()
temp_features_path = file_util.get_temporary_file_path('saliency_map_features')
chunks = misc.chunk(len(smiles_data), process_pool.default_number_processes)
global_parameters[constants.GlobalParameters.input_dimensions] = (len(substructures),)
logger.log('Calculating saliency map features')
with process_pool.ProcessPool(len(chunks)) as pool:
with multi_process_progressbar.MultiProcessProgressbar(len(smiles_data), value_buffer=100) as progress:
for chunk in chunks:
pool.submit(substructure_feature_generator.generate_substructure_features,
smiles_data[chunk['start']:chunk['end']], substructures,
progress=progress.get_slave())
results = pool.get_results()
if local_parameters['count']:
dtype = 'uint16'
else:
dtype = 'uint8'
features_h5 = h5py.File(temp_features_path, 'w')
features = hdf5_util.create_dataset(features_h5, file_structure.Preprocessed.preprocessed,
(len(smiles_data), len(substructures)), dtype=dtype,
chunks=(1, len(substructures)))
offset = 0
for result in results:
if local_parameters['count']:
features[offset:offset + len(result)] = result[:]
else:
features[offset:offset + len(result)] = result[:] > 0
offset += len(result)
features_h5.close()
file_util.move_file(temp_features_path, features_path)
global_parameters[constants.GlobalParameters.input_dimensions] = feature_dimensions
global_parameters[constants.GlobalParameters.preprocessed_data] = features_path
global_parameters[constants.GlobalParameters.feature_files].append(features_path)
def load_substructures(saliency_map_substructures_path, top_n, min_score, active):
if active:
score_data_set = file_structure.SaliencyMapSubstructures.active_substructures_score
smiles_data_set = file_structure.SaliencyMapSubstructures.active_substructures
else:
score_data_set = file_structure.SaliencyMapSubstructures.inactive_substructures_score
smiles_data_set = file_structure.SaliencyMapSubstructures.inactive_substructures
saliency_map_substructures_h5 = h5py.File(saliency_map_substructures_path, 'r')
number_substructures = len(saliency_map_substructures_h5[smiles_data_set])
if top_n is not None:
number_substructures = min(number_substructures, top_n)
if min_score is not None:
score = saliency_map_substructures_h5[score_data_set][:]
number_substructures = min(number_substructures, numpy.sum(score >= min_score))
smiles = saliency_map_substructures_h5[smiles_data_set][:number_substructures]
saliency_map_substructures_h5.close()
substructures = list()
for i in range(len(smiles)):
substructures.append(Chem.MolFromSmiles(smiles[i].decode('UTF-8'), sanitize=False))
return substructures
|
patrick-winter-knime/mol-struct-nets
|
molstructnets/steps/featuregeneration/saliencymapsubstructurefeaturegeneration/saliency_map_substructure_feature_generation.py
|
Python
|
gpl-3.0
| 6,334
|
[
"RDKit"
] |
5440d74ecbc43b7ebdd9b6ca5fb2c40aceef9965530a03b4181aacf3df097db5
|
""" RemoveNestedFunctions turns nested function into top-level functions. """
from pythran.analyses import GlobalDeclarations, ImportedIds
from pythran.passmanager import Transformation
from pythran.tables import MODULES
import ast
class _NestedFunctionRemover(Transformation):
def __init__(self, pm, ctx):
Transformation.__init__(self)
self.ctx = ctx
self.passmanager = pm
self.global_declarations = pm.gather(GlobalDeclarations, ctx.module)
def visit_FunctionDef(self, node):
if MODULES['functools'] not in self.global_declarations.values():
import_ = ast.Import([ast.alias('functools', None)])
self.ctx.module.body.insert(0, import_)
self.global_declarations['functools'] = MODULES['functools']
self.ctx.module.body.append(node)
former_name = node.name
new_name = "pythran_{0}".format(former_name)
ii = self.passmanager.gather(ImportedIds, node, self.ctx)
binded_args = [ast.Name(iin, ast.Load()) for iin in sorted(ii)]
node.args.args = ([ast.Name(iin, ast.Param()) for iin in sorted(ii)] +
node.args.args)
class Renamer(ast.NodeTransformer):
def visit_Call(self, node):
self.generic_visit(node)
if (isinstance(node.func, ast.Name) and
node.func.id == former_name):
node.func.id = new_name
node.args = (
[ast.Name(iin, ast.Load()) for iin in sorted(ii)] +
node.args
)
return node
Renamer().visit(node)
node.name = new_name
proxy_call = ast.Name(new_name, ast.Load())
new_node = ast.Assign(
[ast.Name(former_name, ast.Store())],
ast.Call(
ast.Attribute(
ast.Name('functools', ast.Load()),
"partial",
ast.Load()
),
[proxy_call] + binded_args,
[],
None,
None
)
)
self.generic_visit(node)
return new_node
class RemoveNestedFunctions(Transformation):
"""
Replace nested function by top-level functions.
Also add a call to a bind intrinsic that
generates a local function with some arguments binded.
>>> import ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(x):\\n def bar(y): return x+y\\n bar(12)")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(RemoveNestedFunctions, node)
>>> print pm.dump(backend.Python, node)
import functools
def foo(x):
bar = functools.partial(pythran_bar, x)
bar(12)
def pythran_bar(x, y):
return (x + y)
"""
def visit_Module(self, node):
map(self.visit, node.body)
return node
def visit_FunctionDef(self, node):
nfr = _NestedFunctionRemover(self.passmanager, self.ctx)
node.body = map(nfr.visit, node.body)
return node
|
artas360/pythran
|
pythran/transformations/remove_nested_functions.py
|
Python
|
bsd-3-clause
| 3,162
|
[
"VisIt"
] |
f5afa3a583b492e25cd795691eb916d8bf20600b6108f5329b28051121820e95
|
#!/usr/bin/env python
'''
This is an implementation of the Metropolis Hastings algorithm.
This is used for Bayesian sampling from a distribution that's typically multidimensional and can't be numerically integrated.
It utilizes Markov chains, which are ordered lists of stochastic (random) variables.
The Markov chain wanders around, only remembering the state of the previous iteration.
When the number of samples approaches infinity, the Markov chain will converge to the posterior distribution.
Usage:
Modify the posterior and proposal distribution functions in mh.py to suit your statistical model.
references:
"Pattern Recognition and Machine Learning" by Christopher Bishop
"Information Theory, Inference, and Learning Algorithms" by David Mackay
"Machine Learning: An Algorithmic Perspective" by Stephen Marsland
'''
import numpy as np
from pylab import *
import random
class MH():
def __init__(self, p, q, samples, method):
self.samples = samples # integer number of samples to do, typically > 5,000
self.method = method # independent or random_walk
self.chain = np.zeros(samples) # initialize list of samples to 0
self.p = p # posterior distribution
self.q = q # proposal distribution
def alpha(self,candidate,current):
if self.method=="random_walk":
# Gaussian distribution is symmetric, so equation simplifies to just the Metropolis algorithm
return min(1, self.p(candidate)/self.p(current))
else:
return min(1, self.p(candidate)*self.q(current)/self.p(current)*self.q(candidate))
def generate_candidate(self,mu,sigma):
# randomly generate a candidate value from the proposal distribution
if self.method=="independent":
candidate = random.normalvariate(mu,sigma) # proposed move
elif self.method=="random_walk":
candidate = self.chain[i] + random.normalvariate(mu,sigma) # proposed move
return candidate
def sample(self,mu,sigma,burn_in=250):
self.chain[0] = random.normalvariate(mu,sigma) # initial value
u = np.random.uniform(0.0, 1.0, self.samples) # array of uniform random variables (between 0 and 1)
for i in xrange(1,self.samples-1):
candidate = self.generate_candidate(mu,sigma)
# accept/reject scheme
if u[i]<self.alpha(candidate,self.chain[i]):
# accept the move
self.chain[i+1] = candidate
else:
# reject the move
self.chain[i+1] = self.chain[i]
self.chain = self.chain[burn_in:self.samples] # discard the first burn_in samples to prevent influence of the starting distribution
def plot_results(self):
# create histogram for distribution
figure(1)
hist(self.chain, bins = 30) # histogram
ylabel('Frequency')
xlabel('Value')
title('Histogram of Samples')
# create trace plot of Markov values over all iterations
figure(2)
plot(self.chain)
ylabel('Values')
xlabel('Iteration #')
title('Trace Plot of Markov Values')
show()
def single_sample(self):
return self.chain[random.randrange(0,self.samples)]
if __name__ == '__main__':
def PosteriorDistribution(x):
# creates a probability density function that serves as the proposal distribution
# let's use a bimodal distribution to represent a non-symmetric distribution
# another example could be a mixture of two normal distributions
mu1 = 3 # mean1
mu2 = 10 # mean2
v1 = 10 # variance1
v2 = 3 # variance2
return 0.3*exp(-(x-mu1)**2/v1) + 0.7* exp(-(x-mu2)**2/v2)
def ProposalDistribution(x):
# one option is exp(-x**/2)/sqrt(2*pi) # standard normal PDF
# should be tuned to the posterior distribution
# specify the hyperparameters (mean and variance)
return exp(-(x-5)**2/(10**2)) # 5 = mu, 10 = sigma
model = MH(PosteriorDistribution,ProposalDistribution,10000,"independent") # last 2 args are # samples and method
model.sample(5,10) # mu, sigma, burn-in. for method="random_walk", set mu=0
print 'A sample from the PDF is: ' + str(model.single_sample())
model.plot_results()
|
mlskit/astromlskit
|
MCMC/mh.py
|
Python
|
gpl-3.0
| 4,354
|
[
"Gaussian"
] |
6e505aa705c66246de72e082ffe8000de637a8fb3ef11738e712965fd08f8ed6
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from scipy.stats import multivariate_normal
from pgmpy.factors.continuous import ContinuousFactor
class JointGaussianDistribution(ContinuousFactor):
u"""
In its most common representation, a multivariate Gaussian distribution
over X1...........Xn is characterized by an n-dimensional mean vector μ,
and a symmetric n x n covariance matrix Σ.
This is the base class for its representation.
"""
def __init__(self, variables, mean, covariance):
"""
Parameters
----------
variables: iterable of any hashable python object
The variables for which the distribution is defined.
mean: n x 1, array like
n-dimensional vector where n is the number of variables.
covariance: n x n, 2-d array like
n x n dimensional matrix where n is the number of variables.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import JointGaussianDistribution as JGD
>>> dis = JGD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis.variables
['x1', 'x2', 'x3']
>>> dis.mean
array([[ 1],
[-3],
[4]]))
>>> dis.covariance
array([[4, 2, -2],
[2, 5, -5],
[-2, -5, 8]])
>>> dis.pdf([0,0,0])
0.0014805631279234139
"""
no_of_var = len(variables)
if len(mean) != no_of_var:
raise ValueError("Length of mean_vector must be equal to the\
number of variables.")
self.mean = np.asarray(np.reshape(mean, (no_of_var, 1)), dtype=float)
self.covariance = np.asarray(covariance, dtype=float)
self._precision_matrix = None
if self.covariance.shape != (no_of_var, no_of_var):
raise ValueError("The Covariance matrix should be a square matrix with order equal to\
the number of variables. Got: {got_shape}, Expected: {exp_shape}".format
(got_shape=self.covariance.shape, exp_shape=(no_of_var, no_of_var)))
super(JointGaussianDistribution, self).__init__(variables, None)
@property
def pdf(self):
return lambda *args: multivariate_normal.pdf(args, self.mean.reshape(1, len(self.variables))[0],
self.covariance)
@property
def precision_matrix(self):
"""
Returns the precision matrix of the distribution.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import JointGaussianDistribution as JGD
>>> dis = JGD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis.precision_matrix
array([[ 0.3125 , -0.125 , 0. ],
[-0.125 , 0.58333333, 0.33333333],
[ 0. , 0.33333333, 0.33333333]])
"""
if self._precision_matrix is None:
self._precision_matrix = np.linalg.inv(self.covariance)
return self._precision_matrix
def marginalize(self, variables, inplace=True):
"""
Modifies the distribution with marginalized values.
Parameters
----------
variables: iterator
List of variables over which marginalization is to be done.
inplace: boolean
If inplace=True it will modify the distribution itself,
else would return a new distribution.
Returns
-------
JointGaussianDistribution or None :
if inplace=True (default) returns None
if inplace=False return a new JointGaussianDistribution instance
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import JointGaussianDistribution as JGD
>>> dis = JGD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis.variables
['x1', 'x2', 'x3']
>>> dis.mean
array([[ 1],
[-3],
[ 4]])
>>> dis.covariance
array([[ 4, 2, -2],
[ 2, 5, -5],
[-2, -5, 8]])
>>> dis.marginalize(['x3'])
dis.variables
['x1', 'x2']
>>> dis.mean
array([[ 1],
[-3]]))
>>> dis.covariance
narray([[4, 2],
[2, 5]])
"""
if not isinstance(variables, list):
raise TypeError("variables: Expected type list or array-like,\
got type {var_type}".format(var_type=type(variables)))
phi = self if inplace else self.copy()
var_indexes = [phi.variables.index(var) for var in variables]
index_to_keep = [self.variables.index(var) for var in self.variables
if var not in variables]
phi.variables = [phi.variables[index] for index in index_to_keep]
phi.mean = phi.mean[index_to_keep]
phi.covariance = phi.covariance[np.ix_(index_to_keep, index_to_keep)]
phi._precision_matrix = None
if not inplace:
return phi
def reduce(self, values, inplace=True):
"""
Reduces the distribution to the context of the given variable values.
The formula for the obtained conditional distribution is given by -
For,
.. math:: N(X_j | X_i = x_i) ~ N(mu_{j.i} ; sig_{j.i})
where,
.. math:: mu_{j.i} = mu_j + sig_{j, i} * {sig_{i, i}^{-1}} * (x_i - mu_i)
.. math:: sig_{j.i} = sig_{j, j} - sig_{j, i} * {sig_{i, i}^{-1}} * sig_{i, j}
Parameters
----------
values: list, array-like
A list of tuples of the form (variable_name, variable_value).
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new ContinuosFactor object.
Returns
-------
JointGaussianDistribution or None:
if inplace=True (default) returns None
if inplace=False returns a new JointGaussianDistribution instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import JointGaussianDistribution as JGD
>>> dis = JGD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis.variables
['x1', 'x2', 'x3']
>>> dis.variables
['x1', 'x2', 'x3']
>>> dis.mean
array([[ 1.],
[-3.],
[ 4.]])
>>> dis.covariance
array([[ 4., 2., -2.],
[ 2., 5., -5.],
[-2., -5., 8.]])
>>> dis.reduce([('x1', 7)])
>>> dis.variables
['x2', 'x3']
>>> dis.mean
array([[ 0.],
[ 1.]])
>>> dis.covariance
array([[ 4., -4.],
[-4., 7.]])
"""
if not isinstance(values, list):
raise TypeError("values: Expected type list or array-like,\
got type {var_type}".format(var_type=type(values)))
phi = self if inplace else self.copy()
var_to_reduce = [var for var, value in values]
# index_to_keep -> j vector
index_to_keep = [self.variables.index(var) for var in self.variables
if var not in var_to_reduce]
# index_to_reduce -> i vector
index_to_reduce = [self.variables.index(var) for var in var_to_reduce]
mu_j = self.mean[index_to_keep]
mu_i = self.mean[index_to_reduce]
x_i = np.array([value for var, value in values]).reshape(len(index_to_reduce), 1)
sig_i_j = self.covariance[np.ix_(index_to_reduce, index_to_keep)]
sig_j_i = self.covariance[np.ix_(index_to_keep, index_to_reduce)]
sig_i_i_inv = np.linalg.inv(self.covariance[np.ix_(index_to_reduce, index_to_reduce)])
sig_j_j = self.covariance[np.ix_(index_to_keep, index_to_keep)]
phi.variables = [self.variables[index] for index in index_to_keep]
phi.mean = mu_j + np.dot(np.dot(sig_j_i, sig_i_i_inv), x_i - mu_i)
phi.covariance = sig_j_j - np.dot(np.dot(sig_j_i, sig_i_i_inv), sig_i_j)
phi._precision_matrix = None
if not inplace:
return phi
def normalize(self, inplace=True):
phi = self if inplace else self.copy()
# The pdf of a Joint Gaussian distrinution is always
# normalized. Hence, no changes.
if not inplace:
return phi
def copy(self):
"""
Return a copy of the distribution.
Returns
-------
JointGaussianDistribution: copy of the distribution
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import JointGaussianDistribution as JGD
>>> gauss_dis = JGD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> copy_dis = gauss_dis.copy()
>>> copy_dis.variables
['x1', 'x2', 'x3']
>>> copy_dis.mean
array([[ 1],
[-3],
[ 4]])
>>> copy_dis.covariance
array([[ 4, 2, -2],
[ 2, 5, -5],
[-2, -5, 8]])
>>> copy_dis.precision_matrix
array([[ 0.3125 , -0.125 , 0. ],
[-0.125 , 0.58333333, 0.33333333],
[ 0. , 0.33333333, 0.33333333]])
"""
copy_distribution = JointGaussianDistribution(self.scope(), self.mean.copy(),
self.covariance.copy())
if self._precision_matrix is not None:
copy_distribution._precision_matrix = self._precision_matrix.copy()
return copy_distribution
def to_canonical_factor(self):
u"""
Returns an equivalent CanonicalFactor object.
The formulas for calculating the cannonical factor parameters
for N(μ; Σ) = C(K; h; g) are as follows -
K = sigma^(-1)
h = sigma^(-1) * mu
g = -(0.5) * mu.T * sigma^(-1) * mu -
log((2*pi)^(n/2) * det(sigma)^(0.5))
where,
K,h,g are the canonical factor parameters
sigma is the covariance_matrix of the distribution,
mu is the mean_vector of the distribution,
mu.T is the transpose of the matrix mu,
and det(sigma) is the determinant of the matrix sigma.
Example
-------
>>> import numpy as np
>>> from pgmpy.factors.continuous import JointGaussianDistribution as JGD
>>> dis = JGD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> phi = dis.to_canonical_factor()
>>> phi.variables
['x1', 'x2', 'x3']
>>> phi.K
array([[0.3125, -0.125, 0.],
[-0.125, 0.5833, 0.333],
[ 0., 0.333, 0.333]])
>>> phi.h
array([[ 0.6875],
[-0.54166],
[ 0.33333]]))
>>> phi.g
-6.51533
"""
from pgmpy.factors.continuous import CanonicalFactor
mu = self.mean
sigma = self.covariance
K = self.precision_matrix
h = np.dot(K, mu)
g = -(0.5) * np.dot(mu.T, h)[0, 0] - np.log(
np.power(2 * np.pi, len(self.variables)/2) * np.power(abs(np.linalg.det(sigma)), 0.5))
return CanonicalFactor(self.scope(), K, h, g)
def _operate(self, other, operation, inplace=True):
"""
Gives the CanonicalFactor operation (product or divide) with
the other factor.
Parameters
----------
other: CanonicalFactor
The CanonicalFactor to be multiplied.
operation: String
'product' for multiplication operation and
'divide' for division operation.
Returns
-------
CanonicalFactor or None:
if inplace=True (default) returns None
if inplace=False returns a new CanonicalFactor instance.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import JointGaussianDistribution as JGD
>>> dis1 = JGD(['x1', 'x2', 'x3'], np.array([[1], [-3], [4]]),
... np.array([[4, 2, -2], [2, 5, -5], [-2, -5, 8]]))
>>> dis2 = JGD(['x3', 'x4'], [1, 2], [[2, 3], [5, 6]])
>>> dis3 = dis1 * dis2
>>> dis3.covariance
array([[ 3.6, 1. , -0.4, -0.6],
[ 1. , 2.5, -1. , -1.5],
[-0.4, -1. , 1.6, 2.4],
[-1. , -2.5, 4. , 4.5]])
>>> dis3.mean
array([[ 1.6],
[-1.5],
[ 1.6],
[ 3.5]])
"""
phi = self.to_canonical_factor()._operate(
other.to_canonical_factor(), operation, inplace=False).to_joint_gaussian()
if not inplace:
return phi
|
sandeepkrjha/pgmpy
|
pgmpy/factors/continuous/JointGaussianDistribution.py
|
Python
|
mit
| 13,522
|
[
"Gaussian"
] |
fdf275bdd8112a4ecc72ae0601b24d5700a2245ad9f8f573e3651e1ebcf87aeb
|
#!/usr/bin/env python2.7
# -*- coding:utf-8 -*-
"""
This is a script that runs a simulation of a small OB model.
It consists of an input signal and some OB columns. A column is a set of one
glomerule, its connected mitral cells, and the granule cells.
Script Overview
---------------
1. Import the model and necessary stuff (like numpy)
2. Get the set of parameters (see inside the paramsets directory)
3. Initialize the different cell populations:
- glomeruli
- (synapses between granule and mitral)
- mitral cells
- granule cells
4. Connects the different cell populations:
- glomeruli and mitral cells
- mitral cells and granule cells
5. Set some monitors on the simulation
6. Run the simulation
7. Output simulation information and indexes.
8. Plots
"""
import brian_no_units
from brian import *
def main(args):
import model_utils as mutils
# Set the parameters from the specified file BEFORE any model.* import
import model
mutils.set_model_ps(args.psfile)
import numpy as np
import analysis
import plotting
from utils import print_dict, pairs
from scipy.signal import resample
from model.glomerule import Glomerule
from model.mitral_cells import MitralCells
from model.synapse import Synapse
from model.granule_cells import GranuleCells
# Reset old stuff from Brian memory
clear(erase=True, all=True)
defaultclock.reinit()
# Initialize random generator (necessary mainly for parallel simulations)
np.random.seed()
"""
Parameters
----------
Get the parameter values from the `ps` module, which in turn gets the values
from the file specified in parameters.py.
Set some aliases for the different cell population sizes.
Also check that there is an even number of cells for each column.
Finally set some simulation parameters.
"""
psmt = model.PARAMETERS['Mitral']
psgr = model.PARAMETERS['Granule']
pscommon = model.PARAMETERS['Common']
n_mitral = pscommon['N_mitral']
n_glomeruli = n_granule = n_subpop = pscommon['N_subpop']
# check to have an even number of mitral in each sub-population
assert n_mitral % n_subpop == 0, \
"N_mitral is not a multiple of the number of sub-populations N_subpop."
n_mitral_per_subpop = n_mitral/n_subpop
defaultclock.dt = pscommon['simu_dt']
simu_length = pscommon['simu_length']
"""
Population Initialization
-------------------------
1. glomeruli
*. synapses between granule and mitral cells
3. mitral cells
4. granule cells
"""
# Glomeruli
glom = Glomerule()
glom.add_eqs()
glom.make_pop(n_glomeruli*n_mitral_per_subpop)
# Synapses (granule -- mitral)
synexc = Synapse(synapse_type='exc') # excitatory synapse
synexc.set_eqs_model()
syninhib = Synapse(synapse_type='inhib') # inhibitory synapse
syninhib.set_eqs_model()
# Mitral cells
mt = MitralCells()
mt_supp_eqs = {'var': ['- I_syn', '- g_input*V'],
'eqs': [synexc.get_eqs_model(),
Equations("g_input : siemens*meter**-2")]}
mt.add_eqs(supp_eqs=mt_supp_eqs)
mt.make_pop(n_mitral)
mt.pop.V = (psmt['V_t'] - psmt['V_r'])*np.random.random_sample(np.shape(mt.pop.V)) \
+ psmt['V_r']
# Granule Cells
gr = GranuleCells()
gr_supp_eqs = {'var': ['-I_syn'],
'eqs': [syninhib.get_eqs_model()]}
gr.add_eqs(supp_eqs=gr_supp_eqs)
gr.make_pop(n_granule)
gr.pop.V_D = psgr['E_L']
gr.pop.V_S = psgr['E_L']
"""
Connecting Populations
----------------------
1. Glomeruli and mitral cells
2. Mitral cells and granule cells
"""
# Connecting mitral cells to glomeruli
glmt_connections = diag(ones(n_mitral))
# Glomeruli--Mitral interactions
@network_operation(when='start')
def mt_input():
mt.pop.g_input = dot(glom.pop.g, glmt_connections)
# Connecting sub-population of mitral cells to granule cells
mtgr_connections = mutils.intrapop_connections(n_mitral, n_granule, n_subpop, n_mitral_per_subpop)
# Inter subpopulation connectivities
inter_conn_rate = pscommon['inter_conn_rate']
inter_conn_strength = pscommon['inter_conn_strength']
homeostasy = pscommon['homeostasy']
mtgr_connections, grmt_connections = mutils.interpop_connections(mtgr_connections, n_mitral, n_subpop,
n_mitral_per_subpop, inter_conn_rate, inter_conn_strength,homeostasy)
# Mitral--Granule interactions
@network_operation(when='start')
def graded_synapse():
"""Computes granule and mitral s_syn"""
mt.pop.state('T')[:] = 0.
mt.pop.state('T')[mt.pop.get_refractory_indices()] = 1.
gr.pop.s_syn = dot(mt.pop.s, mtgr_connections)
mt.pop.s_syn = dot(gr.pop.s, grmt_connections)
@network_operation(when='start')
def sum_s():
"""Computes granule self s_syn (for its glomerular column only)"""
for subpop in xrange(n_subpop):
start = subpop*n_mitral_per_subpop
stop = start + n_mitral_per_subpop
gr.pop.s_syn_self[subpop] = sum(mt.pop.state('s')[start:stop])
@network_operation(when='after_groups')
def keep_reset():
mt.pop.state('V')[mt.pop.get_refractory_indices()] = psmt['V_r']
"""
Simulation Monitoring
---------------------
Monitor state variables for the different populations.
"""
glom_ps = ('g')
mt_ps = ('s', 's_syn', 'V')
gr_ps = ('V_D', 's_syn', 's', 's_syn_self')
# Simulation monitors
rec_neurons = True # Must be set to True if we want accurate MPS and STS
timestep = int(pscommon['resample_dt']/pscommon['simu_dt'])
monit_glom = mutils.monit(glom.pop, glom_ps, timestep, reclist=rec_neurons)
monit_mt = mutils.monit(mt.pop, mt_ps, timestep, reclist=rec_neurons, spikes=True)
monit_gr = mutils.monit(gr.pop, gr_ps, timestep)
"""
Running Simulation
------------------
Create Network object and put everything simulation related in it.
Then run this network.
"""
# Gathering simulation objects
netw = Network(glom.pop, mt.pop, gr.pop,
mt_input, graded_synapse, keep_reset, sum_s,
[m for m in monit_glom.values()],
[m for m in monit_mt.values()],
[m for m in monit_gr.values()])
# Simulation run
if args.no_brian_output:
report_output = None
else:
report_output = "text"
netw.run(simu_length, report=report_output)
"""
Information Output
------------------
"""
if args.full_ps:
print 'Full set of parameters:'
print_dict(model.PARAMETERS)
burnin = pscommon['burnin']
times = monit_gr['s'].times
sig_start = where(times > burnin)[0][0]
sts_indexes = {}
mps_indexes = {}
fftmax = {}
mps_indexes['whole'] = analysis.mps(monit_mt['V'], 0, n_mitral, sig_start)
gr_s_syn_self_whole = np.zeros(monit_gr['s_syn_self'][0].shape)
# MPS and STS computation for subpopulation
for subpop in xrange(n_subpop):
start = subpop*n_mitral_per_subpop
stop = start + n_mitral_per_subpop
sts = analysis.sts(monit_gr['s_syn_self'][subpop], monit_mt['spikes'], start, stop, sig_start, burnin)
sts_indexes[subpop] = sts
gr_s_syn_self_whole += monit_gr['s_syn_self'][subpop]
mps = analysis.mps(monit_mt['V'], start, stop, sig_start)
mps_indexes[subpop] = mps
# STS for the whole population
sts_indexes['whole'] = analysis.sts(gr_s_syn_self_whole, monit_mt['spikes'], 0, n_mitral, sig_start, burnin)
# FFT Max index
fftmax = analysis.fftmax(monit_gr['s_syn_self'], n_subpop, pscommon['resample_dt'], sig_start)
# Peak distances index
peak_distances = {}
if n_subpop > 1:
for sub_i, sub_j in pairs(n_subpop):
sig1 = monit_gr['s_syn_self'][sub_i]
sig2 = monit_gr['s_syn_self'][sub_j]
if not peak_distances.has_key(sub_i):
peak_distances[sub_i] = {}
pd_index = analysis.peak_dist_circ_index(sig1, sig2)
peak_distances[sub_i][sub_j] = {}
peak_distances[sub_i][sub_j]['mean'] = pd_index[0]
peak_distances[sub_i][sub_j]['disp'] = pd_index[1]
if not args.no_summary:
print '\nParameters: using', args.psfile
print 'Populations:', n_subpop, 'glomerular columns;',
print n_mitral, 'mitral cells;', n_granule, 'granule cells.'
print 'Times:', simu_length, 'of simulation; dt =', defaultclock.dt, '.'
print 'Indexes: STS =', sts_indexes, '\nMPS =', mps_indexes
print 'FFT peaks (Hz):', fftmax
print 'Peak distances index:', peak_distances
"""
Plotting
--------
Plot monitored variables and a scatter plot.
"""
if not args.no_plot:
# Raster plot
spikes_it = monit_mt['spikes'].it
plotting.raster_plot(spikes_it[0], spikes_it[1], mtgr_connections)
# Membrane potentials
if not rec_neurons: # if we only have a couple of recorded neurons
plotting.memb_plot_figure(monit_mt, monit_gr, rec_neurons, n_granule)
# Granule synapses
plotting.granule_figure(monit_gr, pscommon)
show()
"""
Simulation records
------------------
Put numpy arrays in var `results` to save them into the simulation record.
Note: the variable must be monitored by Brian.
"""
# Add parameters
ps_arrays = {'mtgr_connections': (mtgr_connections,
"Connection matrix from mitral (rows) to granules (columns)")}
# Add results
array_spikes_it = np.array((monit_mt['spikes'].it[0],
monit_mt['spikes'].it[1]))
results = {}
# Mean inputs
mean_inputs = np.ndarray((n_glomeruli, monit_glom['g'].values.shape[1]))
for glom in xrange(n_glomeruli):
start_subpop = glom*n_mitral_per_subpop
stop_subpop = start_subpop + n_mitral_per_subpop
mean_inputs[glom] = np.mean(monit_glom['g'].values[start_subpop:stop_subpop], axis=0)
# Mean membrane potentials
mean_memb_pot = np.ndarray((n_glomeruli*2, monit_mt['V'].values.shape[1]))
bin_interco_matrix = (mtgr_connections > 0.)
interco_neurons = (bin_interco_matrix.sum(axis=1) > 1)
for glom in xrange(n_glomeruli):
start_subpop = glom*n_mitral_per_subpop
stop_subpop = start_subpop + n_mitral_per_subpop
# Get subpopulation membrane potentials and interconnected neurons
subpop_memb_pot = monit_mt['V'].values[start_subpop:stop_subpop]
subpop_interco_neurons = interco_neurons[start_subpop:stop_subpop]
# Compute one mean for interconnected neurons and another for the other neurons
mean_pop = np.mean(subpop_memb_pot[~subpop_interco_neurons], axis=0)
mean_pop_interco = np.mean(subpop_memb_pot[subpop_interco_neurons], axis=0)
mean_memb_pot[glom*2] = mean_pop
mean_memb_pot[glom*2 + 1] = mean_pop_interco
results['data'] = {'spikes_it': [array_spikes_it,
"Spikes: one array for the neuron number, another one for the spike times."],
'input': [mean_inputs,
"Mean network input conductance value for each glomerule."],
's_granule': [monit_gr['s'].values,
"Variable 's' of the granules."],
's_syn_self': [monit_gr['s_syn_self'].values,
"Variable 's_syn' for the granule, without integrating the mitral 's' from other subpopulations."],
'mean_memb_pot': [mean_memb_pot,
"Mean membrane potential. For each subpop: one mean for the interconnected neurons and one mean for the non-interconnected neurons."]}
results['indexes'] = {'MPS': mps_indexes, 'STS': sts_indexes, 'FFTMAX': fftmax,
'peak_distances': peak_distances}
return {'set': model.PARAMETERS, 'arrays': ps_arrays}, results
if __name__ == '__main__':
# Argument parsing
from arg_parsers import SIM_PARSER
args = SIM_PARSER.parse_args()
# Run script
ps, res = main(args)
|
neuro-lyon/multiglom-model
|
src/multiglom_network.py
|
Python
|
mit
| 12,424
|
[
"Brian",
"NEURON"
] |
18bc96f03e1847c7ac04524bf41be322e6de88fb452dcb5cca1edde08f8b72a2
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Mozilla Firefox history database plugin."""
import collections
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import firefox_history
from tests.parsers.sqlite_plugins import test_lib
class FirefoxHistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox history database plugin."""
def testProcessPriorTo24(self):
"""Tests the Process function on a Firefox History database file."""
# This is probably version 23 but potentially an older version.
plugin = firefox_history.FirefoxHistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['places.sqlite'], plugin)
# The places.sqlite file contains 205 events (1 page visit,
# 2 x 91 bookmark records, 2 x 3 bookmark annotations,
# 2 x 8 bookmark folders).
# However there are three events that do not have a timestamp
# so the test file will show 202 extracted events.
self.assertEqual(storage_writer.number_of_events, 202)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
# Check the first page visited event.
expected_event_values = {
'data_type': 'firefox:places:page_visited',
'date_time': '2011-07-01 11:16:21.371935',
'host': 'news.google.com',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_VISITED,
'title': 'Google News',
'url': 'http://news.google.com/',
'visit_count': 1,
'visit_type': 2}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Check the first bookmark event.
expected_event_values = {
'data_type': 'firefox:places:bookmark',
'date_time': '2011-07-01 11:13:59.266344',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
# Check the second bookmark event.
expected_event_values = {
'data_type': 'firefox:places:bookmark',
'date_time': '2011-07-01 11:13:59.267198',
'places_title': (
'folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder=TOOLBAR&'
'sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation=livemark'
'%2FfeedURI&maxResults=10&queryType=1'),
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'title': 'Recently Bookmarked',
'type': 'URL',
'url': (
'place:folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder='
'TOOLBAR&sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation='
'livemark%2FfeedURI&maxResults=10&queryType=1'),
'visit_count': 0}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
# Check the first bookmark annotation event.
expected_event_values = {
'data_type': 'firefox:places:bookmark_annotation',
'date_time': '2011-07-01 11:13:59.267146',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[183], expected_event_values)
# Check another bookmark annotation event.
expected_event_values = {
'content': 'RecentTags',
'data_type': 'firefox:places:bookmark_annotation',
'date_time': '2011-07-01 11:13:59.267605',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED,
'title': 'Recent Tags',
'url': 'place:sort=14&type=6&maxResults=10&queryType=1'}
self.CheckEventValues(storage_writer, events[184], expected_event_values)
# Check the second last bookmark folder event.
expected_event_values = {
'data_type': 'firefox:places:bookmark_folder',
'date_time': '2011-03-21 10:05:01.553774',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[200], expected_event_values)
# Check the last bookmark folder event.
expected_event_values = {
'data_type': 'firefox:places:bookmark_folder',
'date_time': '2011-07-01 11:14:11.766851',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'title': 'Latest Headlines'}
self.CheckEventValues(storage_writer, events[201], expected_event_values)
def testProcessVersion25(self):
"""Tests the Process function on a Firefox History database file v 25."""
plugin = firefox_history.FirefoxHistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['places_new.sqlite'], plugin)
# The places.sqlite file contains 84 events:
# 34 page visits.
# 28 bookmarks
# 14 bookmark folders
# 8 annotations
self.assertEqual(storage_writer.number_of_events, 84)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
counter = collections.Counter()
for event in events:
event_data = self._GetEventDataOfEvent(storage_writer, event)
counter[event_data.data_type] += 1
self.assertEqual(counter['firefox:places:bookmark'], 28)
self.assertEqual(counter['firefox:places:page_visited'], 34)
self.assertEqual(counter['firefox:places:bookmark_folder'], 14)
self.assertEqual(counter['firefox:places:bookmark_annotation'], 8)
expected_event_values = {
'data_type': 'firefox:places:page_visited',
'date_time': '2013-10-30 21:57:11.281942',
'host': 'code.google.com',
'url': 'http://code.google.com/p/plaso',
'visit_count': 1,
'visit_type': 2}
self.CheckEventValues(storage_writer, events[10], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
kiddinn/plaso
|
tests/parsers/sqlite_plugins/firefox_history.py
|
Python
|
apache-2.0
| 5,892
|
[
"VisIt"
] |
b86510d401cebd0ac91b1dc304ad6f35245e58cf8deb5a2eb52a8e6bf9371f3f
|
# Scan a genome database using BLAST
# Created by Bryan White, 2015
# input: list of FASTA files
# output: completed BLAST database
#
# Copyright (c) 2015-2016 Bryan White, [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import random
import subprocess
import textwrap
import distance
import time
import argparse
import pickle
## Local libraries
from blst_libs import tistamp, load_sequences, rand_subseq, sl_window, similar
start_time = time.time()
query_file = "query_targets.txt"
db_file = "target_genomes.txt"
output_path = "~/data_analysis/data/genome_assemblies/"
blast_path = "~/data_analysis/apps/ncbi-blast-2.2.31+/bin/"
seq_type = "blastn"
'''
parser = argparse.ArgumentParser(description='Scan genomes')
parser.add_argument('--sum', dest='accumulate',
help='sum the integers (default: find the max)')
'''
#args = parser.parse_args()
random_scan = 1
random_pairwise = 1
blast_scan = 0
mem_scan = 1
r_scan_size = 1000
sl_window_size = 1000
r_scan_n = 5000
merge_contigs = 1
seq_line_limit = 9999999999
sl_increment = int(0.75*sl_window_size)
use_fastcomp = 0
dist_pcnt = 0.30
sliding_window = 1
expand_size = 10
#pickle_cache = 'build'
clear_cache = 0
#pickle_cache = 1
print("random_scan\t" + str(random_scan))
print("blast_scan\t" + str(blast_scan))
print("mem_scan\t" + str(mem_scan))
print("r_scan_size\t" + str(r_scan_size))
print("r_scan_n\t" + str(r_scan_n))
print("merge_contigs\t" + str(merge_contigs))
print("seq_line_limit\t" + str(seq_line_limit))
print("sl_window_size\t" + str(sl_window_size))
print("sl_increment\t" + str(sl_increment))
print("Cache: " + str(clear_cache))
'''
-word_size <Integer, >=4>
Word size for wordfinder algorithm (length of best perfect match)
-gapopen <Integer>
Cost to open a gap
-gapextend <Integer>
Cost to extend a gap
-penalty <Integer, <=0>
Penalty for a nucleotide mismatch
-reward <Integer, >=0>
Reward for a nucleotide match
'''
# Load query file names
print(tistamp(1)+"\tReading queries...")
queries = []
with open(query_file) as inputfile:
for line in inputfile:
queries.append(line.strip().split(','))
# Load database file names
print(tistamp(1)+"\tReading databases...")
databases = []
with open(db_file) as inputfile:
for line in inputfile:
databases.append(line.strip().split(','))
# Start main loop of query sequence files
for query_file in queries:
query_file = ''.join(query_file[0])
# Begin analysis of query file
print(tistamp(1)+"\tLoading query file: "+query_file)
# Determine query file path
split_query = query_file.split('/')
query_file_name = split_query[-1]
split_query.pop()
query_path = '/'.join(split_query)
print(tistamp(1)+"\tQuery File path: " + query_path)
# Load sequences into dictionary
print(tistamp(1)+"\tLoading sequences...")
query_sequences = {}
pickle_query_file = os.path.expanduser(query_file+".pickle")
if clear_cache == 1:
if os.path.exists(pickle_query_file):
print(tistamp(1)+"\tClearing Cache..." + pickle_query_file)
os.unlink(pickle_query_file)
if os.path.exists(pickle_query_file):
print(tistamp(1)+"\tLoading Cache..." + pickle_query_file)
query_sequences = pickle.load( open( pickle_query_file, "rb" ) )
else:
print(tistamp(1)+"\tBuilding Cache..." + pickle_query_file)
query_sequences = load_sequences(query_file, merge_contigs, seq_line_limit)
pickle.dump(query_sequences, open( pickle_query_file, "wb" ) )
print(tistamp(1)+"\tLoaded "+str(len(query_sequences))+" sequences")
# Access first sequence in dict (should be merged)
query_sequence = next (iter (query_sequences.values()))
print(tistamp(1)+"\tGenerating "+str(r_scan_n)+" slices of size "+str(r_scan_size)+"...")
# Generate random query sequences
'''
query_seqs = {}
if(random_pairwise == 1):
for i in range(0,r_scan_n):
(sub_seq,q_pos) = rand_subseq(query_sequence, r_scan_size)
query_seqs[q_pos] = sub_seq
print(tistamp(1)+"\tCreated "+str(r_scan_n)+" random sub-sequences.")
'''
#elif(sliding_window == 1):
#query_seqs = sl_window(query_sequence, sl_window_size, sl_increment)
for database in databases:
database = ''.join(database[0])
database_file_name = os.path.expanduser(database)
print(tistamp(1)+"\tLoading database: "+ database)
# Load database sequences
db_sequences = {}
pickle_db_file = os.path.expanduser(database+".pickle")
if clear_cache == 1:
if os.path.exists(pickle_db_file):
print(tistamp(1)+"\tClearing Cache..." + pickle_db_file)
os.unlink(pickle_db_file)
if os.path.exists(pickle_db_file):
print(tistamp(1)+"\tLoading Cache..." + pickle_db_file)
db_sequences = pickle.load( open( pickle_db_file, "rb" ) )
else:
print(tistamp(1)+"\tBuilding Cache..." + pickle_db_file)
db_sequences = load_sequences(database, merge_contigs, seq_line_limit)
pickle.dump(db_sequences, open( pickle_db_file, "wb" ) )
db_sequence = next (iter (db_sequences.values()))
search_i = 1
found_seqs = {}
while True:
print(tistamp(1)+"\tSEARCH ROUND: " + str(search_i))
dup_hits_i = 0
hits_i = 0
dup_hits_list = []
blst_size_cutoff = float(r_scan_size - r_scan_size*dist_pcnt)
print(tistamp(1)+"\t"+str(blst_size_cutoff))
#r_scan_sub_file = os.path.expanduser(output_path+"r_scan_contigs.fas")
r_scan_sub_file = query_path+"/r_scan_contigs.fas"
scan_file = query_file # Actual file that will be blasted against the database
# Access first sequence in dict (should be merged)
query_sequence = next (iter (query_sequences.values()))
if search_i == 1:
print(tistamp(1)+"\tGenerating "+str(r_scan_n)+" slices of size "+str(r_scan_size)+"...")
r_scan_f = open(os.path.expanduser(r_scan_sub_file), 'w')
for i in range(0,r_scan_n):
(sub_seq,seq_start,seq_end) = rand_subseq(query_sequence, r_scan_size)
r_scan_f.write(">Sub_g:" + str(i) + "_" + str(seq_start) + "_" + str(seq_end) + "\n" + sub_seq + "\n")
print(tistamp(1)+"\tPrinted "+str(r_scan_n)+" random sub-sequences to "+r_scan_sub_file)
else:
print(tistamp(1)+"\tRefining query sequences from: " + str(len(found_seqs)))
current_expand_size = expand_size*search_i
print(tistamp(1)+"\tExpanding: "+ str(current_expand_size))
r_scan_f = open(os.path.expanduser(r_scan_sub_file), 'w')
for q_pos, q_end in found_seqs.items():
q_left = int(q_pos) - current_expand_size
q_right = int(q_end) + current_expand_size
query_seq = query_sequence[q_left:q_right]
r_scan_f.write(">Sub_g:" + str(i) + "_" + str(seq_start) + "_" + str(seq_end) + "\n" + query_seq + "\n")
#ldist = distance.levenshtein(q_seq,d_seq)
#else:
#ldist = distance.fast_comp(q_seq,d_seq)
r_scan_f.close()
# Reset found seqs
found_seqs = {}
# Set query file to sub sequence file
scan_file = r_scan_sub_file
print(tistamp(1)+"\tTargeting "+database+" with ")
blst_results_file = query_path+"/qry_"+query_file_name+"_results_"+tistamp(2)+".tsv"
blst_results_file = os.path.expanduser(blst_results_file)
print(tistamp(1)+"\tOutputing BLAST results to "+blst_results_file)
# Form blastn command
scandb_command = blast_path + seq_type + " -query " + os.path.expanduser(scan_file) \
+ " -db " + os.path.expanduser(''.join(database)) +\
" -outfmt \"6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore\"" +\
" -out " + blst_results_file
print(tistamp(1)+"\tRunning: ")
print(tistamp(1)+"\t"+scandb_command)
#sts = subprocess.Popen(scandb_command, shell=True).wait()
os.system(scandb_command)
blst_results = []
with open(blst_results_file) as inputfile:
for line in inputfile:
blst_results.append(line.strip().split('\t'))
#print("qseqid\tsseqid\tpident\tlength\tmismatch\tgapopen\tqstart\tqend\tsstart\tsend\tevalue\tbitscore")
# 0 1 2 3 4 5 6 7 8 9 10 11
# Store blast hits
blst_hits = {}
for result in blst_results:
q_sid = result[0]
if q_sid in blst_hits:
blst_hits[q_sid] += 1
else:
blst_hits[q_sid] = 1
# Find hits mapping to only 1 region
unique_hits = {}
for q_sid, count in blst_hits.items():
if count == 1:
unique_hits[q_sid] = 1
for unique_q_sid, count in unique_hits.items():
for result in blst_results:
q_sid = result[0]
split_id = q_sid.split('_')
q_start = split_id[2]
q_end = split_id[3]
q_length = result[3]
if float(q_length) >= blst_size_cutoff:
if q_sid == unique_q_sid:
print(tistamp(1)+"\tFound at: " + q_sid + ": "+ str(q_start) + " => " + str(q_end))
found_seqs[q_start] = q_end
# Iterate number of search rounds
search_i = search_i + 1
if len(found_seqs) == 0:
print(tistamp(1)+"\tDone")
break
for q_start, q_end in found_seqs.items():
print(q_start + " => " + q_end)
end_time = time.time()
print("Time: " + str(end_time - start_time))
|
bpwhite/bioinformatics-toolbox
|
src/blst_scan_genomes.py
|
Python
|
gpl-3.0
| 9,596
|
[
"BLAST"
] |
277e443463d9a36fc05f250ad1b07e5893a6f66d5f8d99ecd43ccdf111e99be6
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jul 25, 2014
@author: noe
'''
from __future__ import absolute_import
import unittest
from pyemma.util import statistics
import numpy as np
from six.moves import range
class TestStatistics(unittest.TestCase):
def assertConfidence(self, sample, alpha, precision):
alpha = 0.5
conf = statistics.confidence_interval(sample, alpha)
n_in = 0.0
for i in range(len(sample)):
if sample[i] > conf[0] and sample[i] < conf[1]:
n_in += 1.0
assert(alpha - (n_in/len(sample)) < precision)
def test_confidence_interval(self):
# exponential distribution
self.assertConfidence(np.random.exponential(size=10000), 0.5, 0.01)
self.assertConfidence(np.random.exponential(size=10000), 0.8, 0.01)
self.assertConfidence(np.random.exponential(size=10000), 0.95, 0.01)
# Gaussian distribution
self.assertConfidence(np.random.normal(size=10000), 0.5, 0.01)
self.assertConfidence(np.random.normal(size=10000), 0.8, 0.01)
self.assertConfidence(np.random.normal(size=10000), 0.95, 0.01)
if __name__ == "__main__":
unittest.main()
|
gph82/PyEMMA
|
pyemma/util/tests/statistics_test.py
|
Python
|
lgpl-3.0
| 1,944
|
[
"Gaussian"
] |
8eeab67c3d4c1342f5d56c916e7f231a97226c120cecffa15e8d023c0f7d18cd
|
__author__ = 'Tom Schaul, [email protected]'
from scipy import zeros, randn
from random import random, sample, gauss
from topology import TopologyEvolvable
class MaskedParameters(TopologyEvolvable):
""" A module with a binary mask that can disable (=zero) parameters.
If no maximum is set, the mask can potentially have all parameters enabled.
The maxComplexity represents the number of allowed enabled parameters. """
maskFlipProbability = 0.05
mutationStdev = 0.1
# number of bits in the mask that can be maximally on at once (None = all)
# Note: there must always be at least one on
maxComplexity = None
# probability of mask bits being on in a random mask (subject to the constraint above)
maskOnProbability = 0.5
def __init__(self, pcontainer, **args):
TopologyEvolvable.__init__(self, pcontainer, **args)
if self.maxComplexity == None:
self.maxComplexity = self.pcontainer.paramdim
self.randomize()
self.maskableParams = self.pcontainer.params.copy()
self._applyMask()
def _applyMask(self):
""" apply the mask to the module. """
self.pcontainer._params[:] = self.mask*self.maskableParams
@property
def paramdim(self):
return sum(self.mask)
@property
def params(self):
""" returns an array with only the unmasked parameters """
x = zeros(self.paramdim)
paramcount = 0
for i in range(len(self.maskableParams)):
if self.mask[i] == True:
x[paramcount] = self.maskableParams[i]
paramcount += 1
return x
def _setParameters(self, x):
""" sets only the unmasked parameters """
paramcount = 0
for i in range(len(self.maskableParams)):
if self.mask[i] == True:
self.maskableParams[i] = x[paramcount]
paramcount += 1
self._applyMask()
def randomize(self, **args):
""" an initial, random mask (with random params)
with as many parameters enabled as allowed"""
self.mask = zeros(self.pcontainer.paramdim, dtype=bool)
onbits = []
for i in range(self.pcontainer.paramdim):
if random() > self.maskOnProbability:
self.mask[i] = True
onbits.append(i)
over = len(onbits) - self.maxComplexity
if over > 0:
for i in sample(onbits, over):
self.mask[i] = False
self.maskableParams = randn(self.pcontainer.paramdim)*self.stdParams
self._applyMask()
def topologyMutate(self):
""" flips some bits on the mask
(but do not exceed the maximum of enabled parameters). """
for i in range(self.pcontainer.paramdim):
if random() < self.maskFlipProbability:
self.mask[i] = not self.mask[i]
tooMany = sum(self.mask) - self.maxComplexity
for i in range(tooMany):
while True:
ind = int(random()*self.pcontainer.paramdim)
if self.mask[ind]:
self.mask[ind] = False
break
if sum(self.mask) == 0:
# CHECKME: minimum of one needs to be on
ind = int(random()*self.pcontainer.paramdim)
self.mask[ind] = True
self._applyMask()
def mutate(self):
""" add some gaussian noise to all parameters."""
# CHECKME: could this be partly outsourced to the pcontainer directly?
for i in range(self.pcontainer.paramdim):
self.maskableParams[i] += gauss(0, self.mutationStdev)
self._applyMask()
|
daanwierstra/pybrain
|
pybrain/structure/evolvables/maskedparameters.py
|
Python
|
bsd-3-clause
| 3,787
|
[
"Gaussian"
] |
7e18ebfb8ea0d8ec62ed2375956c67a486a0f888734336912f84cf1a2a471b3c
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import thermostats_common
import numpy as np
import espressomd
import espressomd.accumulators
import espressomd.observables
class BrownianThermostat(ut.TestCase, thermostats_common.ThermostatsCommon):
"""Tests velocity distributions and diffusion for Brownian Dynamics"""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.cell_system.set_regular_decomposition(use_verlet_lists=True)
system.cell_system.skin = 0
system.periodicity = [0, 0, 0]
def setUp(self):
np.random.seed(42)
self.system.integrator.set_brownian_dynamics()
def tearDown(self):
self.system.time_step = 1e-12
self.system.cell_system.skin = 0.0
self.system.part.clear()
self.system.auto_update_accumulators.clear()
self.system.thermostat.turn_off()
self.system.integrator.set_vv()
def check_vel_dist_global_temp(self, recalc_forces, loops):
"""Test velocity distribution for global temperature parameters.
Parameters
----------
recalc_forces : :obj:`bool`
True if the forces should be recalculated after every step.
loops : :obj:`int`
Number of sampling loops
"""
N = 200
system = self.system
system.time_step = 1.6
kT = 1.1
gamma = 3.5
system.thermostat.set_brownian(kT=kT, gamma=gamma, seed=41)
v_minmax = 5
bins = 4
error_tol = 0.01
self.check_global(
N, kT, loops, v_minmax, bins, error_tol, recalc_forces)
def test_vel_dist_global_temp(self):
"""Test velocity distribution for global Brownian parameters."""
self.check_vel_dist_global_temp(False, loops=200)
def test_vel_dist_global_temp_initial_forces(self):
"""Test velocity distribution for global Brownian parameters,
when using the initial force calculation.
"""
self.check_vel_dist_global_temp(True, loops=170)
@utx.skipIfMissingFeatures("THERMOSTAT_PER_PARTICLE")
def test_vel_dist_per_particle(self):
"""Test Brownian dynamics with particle-specific kT and gamma. Covers
all combinations of particle-specific gamma and temp set or not set.
"""
N = 400
system = self.system
system.time_step = 1.9
kT = 0.9
gamma = 3.2
gamma2 = 4.3
system.thermostat.set_brownian(kT=kT, gamma=gamma, seed=41)
loops = 200
v_minmax = 5
bins = 4
error_tol = 0.012
self.check_per_particle(
N, kT, gamma2, loops, v_minmax, bins, error_tol)
def test_msd_global_temp(self):
"""Tests diffusion via MSD for global gamma and temperature"""
gamma = 9.4
kT = 0.37
dt = 0.5
system = self.system
p = system.part.add(pos=(0, 0, 0))
system.time_step = dt
system.thermostat.set_brownian(kT=kT, gamma=gamma, seed=41)
system.cell_system.skin = 0.4
pos_obs = espressomd.observables.ParticlePositions(ids=(p.id,))
c_pos = espressomd.accumulators.Correlator(
obs1=pos_obs, tau_lin=16, tau_max=100., delta_N=1,
corr_operation="square_distance_componentwise",
compress1="discard1")
system.auto_update_accumulators.add(c_pos)
system.integrator.run(30000)
c_pos.finalize()
# Check MSD
msd = c_pos.result()
tau = c_pos.lag_times()
system.auto_update_accumulators.clear()
def expected_msd(x):
return 2. * kT / gamma * x
for i in range(2, 6):
np.testing.assert_allclose(
msd[i], expected_msd(tau[i]), rtol=0.02)
def test_08__noise_correlation(self):
"""Checks that the Brownian noise is uncorrelated"""
system = self.system
system.time_step = 0.01
system.cell_system.skin = 0.1
kT = 3.2
system.thermostat.set_brownian(kT=kT, gamma=2.1, seed=17)
system.part.add(pos=np.zeros((2, 3)))
steps = int(1e4)
error_delta = 0.04
self.check_noise_correlation(kT, steps, error_delta)
if __name__ == "__main__":
ut.main()
|
pkreissl/espresso
|
testsuite/python/brownian_dynamics_stats.py
|
Python
|
gpl-3.0
| 4,981
|
[
"ESPResSo"
] |
6bc8cf7270a1329f25508092dce1a6019601d3bdcb82b672b5742717db5b56ce
|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
"""
Error handlers for errors originating from the Submission systems.
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "[email protected]"
__date__ = "May 2014"
from pymatgen.io.gwwrapper.scheduler_error_parsers import get_parser
from custodian.custodian import ErrorHandler
class SchedulerErrorHandler(ErrorHandler):
"""
Custodian error handler for scheduler related errors
scheduler_adapter takes the scheduler, it should at least provide a .name attribute indentifying the scheduler,
currently 'slurm' is supported.
If the scheduler adapter also provides the methods defined in CorrectorProtocolScheduler, problems can also be
fixed by .apply_corrections.
If a application_adapter is also provided and it provides the methods defined in CorrectorProtocolApplication
problems can also be fixed a the level of the application, e.g. making the application require less memory.
"""
def __init__(self, scheduler_adapter, application_adapter=None, err_file='queue.err', out_file='queue.out',
run_err_file='run.err', batch_err_file='batch.err'):
self.scheduler_adapter = scheduler_adapter
self.application_adapter = application_adapter
self.err_file = err_file
self.out_file = out_file
self.run_err_file = run_err_file
self.batch_err_file = batch_err_file
self.errors = []
self.corrections = {}
def check(self):
"""
Check for the defined errors, put all found errors in self.errors, return True if any were found False if no
errors were found
"""
parser = get_parser(self.scheduler_adapter.name, err_file=self.err_file, out_file=self.out_file,
run_err_file=self.run_err_file, batch_err_file=self.batch_err_file)
parser.parse()
self.errors = parser.errors
if len(self.errors) == 0:
return False
else:
return True
def correct(self):
"""
For custodian compatibility
"""
self.return_corrections()
def return_corrections(self):
for error in self.errors:
self.corrections.update({error: {'scheduler_adapter_solutions': [], 'aplication_adapter_solutions': []}})
self.corrections[error]['scheduler_adapter_solutions'].append(error.scheduler_adapter_solutions)
self.corrections[error]['application_adapter_solutions'].append(error.application_adapter_solutions)
return self.corrections
def apply_corrections(self):
"""
Method to directly apply the corrections.
"""
for error in self.errors:
for solution in error.scheduler_adapter_solutions:
if self.scheduler_adapter is not None:
if self.scheduler_adapter.__getattribut__(solution[0].__name__)(solution[1]):
return True
for solution in error.application_adapter_solutions:
if self.application_adapter is not None:
if self.application_adapter.__getattribut__(solution[0].__name__)(solution[1]):
return True
return False
|
yanikou19/pymatgen
|
pymatgen/io/gwwrapper/scheduler_error_handlers.py
|
Python
|
mit
| 3,375
|
[
"pymatgen"
] |
a3913abe1f8848dd8dc5d195aed21f3990bd32e8a870f37addb2c347453a91c7
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import espressomd
from espressomd import magnetostatics
from tests_common import generate_test_for_class
class MagnetostaticsInteractionsTests(ut.TestCase):
# Handle to espresso system
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
def setUp(self):
self.system.box_l = [10, 10, 10]
if not self.system.part.exists(0):
self.system.part.add(id=0, pos=(0.1, 0.1, 0.1), dip=(1.3, 2.1, -6))
if not self.system.part.exists(1):
self.system.part.add(id=1, pos=(0, 0, 0), dip=(7.3, 6.1, -4))
if espressomd.has_features(["DP3M"]):
test_DP3M = generate_test_for_class(
system, magnetostatics.DipolarP3M,
dict(prefactor=1.0, epsilon=0.0, inter=1000,
mesh_off=[0.5, 0.5, 0.5], r_cut=2.4, mesh=[8, 8, 8],
cao=1, alpha=12, accuracy=0.01, tune=False))
if espressomd.has_features(["DIPOLAR_DIRECT_SUM"]):
test_DdsCpu = generate_test_for_class(
system, magnetostatics.DipolarDirectSumCpu, dict(prefactor=3.4))
if espressomd.has_features("EXPERIMENTAL_FEATURES"):
test_DdsRCpu = generate_test_for_class(
system, magnetostatics.DipolarDirectSumWithReplicaCpu,
dict(prefactor=3.4, n_replica=2))
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/magnetostaticInteractions.py
|
Python
|
gpl-3.0
| 2,066
|
[
"ESPResSo"
] |
a6a7fa1c3c30be3c20e7daf453c5cfc1805f3318c48e4f05f0d44107bcc9fac8
|
import random
from _collections import OrderedDict
import quantities
import theano
from theano import tensor as TT
import numpy as np
from . import ensemble
from . import simplenode
from . import probe
from . import origin
from . import input
from . import subnetwork
from . import helpers
class Network(object):
def __init__(self, name, seed=None, fixed_seed=None, dt=.001):
"""Wraps an NEF network with a set of helper functions
for simplifying the creation of NEF models.
:param string name:
create and wrap a new Network with the given name.
:param int seed:
random number seed to use for creating ensembles.
This one seed is used only to start the
random generation process, so each neural group
created will be different.
"""
self.name = name
self.dt = dt
self.run_time = 0.0
self.seed = seed
self.fixed_seed = fixed_seed
# all the nodes in the network, indexed by name
self.nodes = {}
# the function call to run the theano portions of the model
self.theano_tick = None
# the list of nodes that have non-theano code
self.tick_nodes = []
self.random = random.Random()
if seed is not None:
self.random.seed(seed)
def add(self, node):
"""Add an arbitrary non-theano node to the network.
Used for inputs, SimpleNodes, and Probes. These nodes will be
added to the Theano graph if the node has an "update()" function,
but will also be triggered explicitly at every tick
via the node's `theano_tick()` function.
:param Node node: the node to add to this network
"""
# remake theano_tick function, in case the node has Theano updates
self.theano_tick = None
self.tick_nodes.append(node)
self.nodes[node.name] = node
def connect(self, pre, post, transform=None, weight=1,
index_pre=None, index_post=None, pstc=0.01,
func=None):
"""Connect two nodes in the network.
Note: cannot specify (transform) AND any of
(weight, index_pre, index_post).
*pre* and *post* can be strings giving the names of the nodes,
or they can be the nodes themselves (Inputs and Ensembles are
supported). They can also be actual Origins or Terminations,
or any combination of the above.
If transform is not None, it is used as the transformation matrix
for the new termination. You can also use *weight*, *index_pre*,
and *index_post* to define a transformation matrix instead.
*weight* gives the value, and *index_pre* and *index_post*
identify which dimensions to connect.
If *func* is not None, a new Origin will be created on the
pre-synaptic ensemble that will compute the provided function.
The name of this origin will be taken from the name of
the function, or *origin_name*, if provided. If an
origin with that name already exists, the existing origin
will be used rather than creating a new one.
:param string pre: Name of the node to connect from.
:param string post: Name of the node to connect to.
:param float pstc:
post-synaptic time constant for the neurotransmitter/receptor
on this connection
:param transform:
The linear transfom matrix to apply across the connection.
If *transform* is T and *pre* represents ``x``,
then the connection will cause *post* to represent ``Tx``.
Should be an N by M array, where N is the dimensionality
of *post* and M is the dimensionality of *pre*.
:type transform: array of floats
:param index_pre:
The indexes of the pre-synaptic dimensions to use.
Ignored if *transform* is not None.
See :func:`helpers.compute_transform()`
:param float weight:
Scaling factor for a transformation defined with
*index_pre* and *index_post*.
Ignored if *transform* is not None.
See :func:`helpers.compute_transform()`
:type index_pre: List of integers or a single integer
:param index_post:
The indexes of the post-synaptic dimensions to use.
Ignored if *transform* is not None.
See :func:`helpers.compute_transform()`
:type index_post: List of integers or a single integer
:param function func:
Function to be computed by this connection.
If None, computes ``f(x)=x``.
The function takes a single parameter ``x``, which is
the current value of the *pre* ensemble, and must return
either a float or an array of floats.
:param string origin_name:
Name of the origin to check for / create to compute
the given function.
Ignored if func is None. If an origin with this name already
exists, the existing origin is used
instead of creating a new one.
"""
# 1) pre = decoded, post = decoded
# - in this case, transform will be
# (post.dimensions x pre.origin.dimensions)
# - decoded_input will be (post.array_size x post.dimensions)
# reset timer in case the model has been run,
# as adding a new node requires rebuilding the theano function
self.theano_tick = None
# see if a termination name was specified
# right now only relevant for SimpleNodes
post_split = post.split(':');
post = post_split[0];
if len(post_split) > 1: term_name = post_split[1]
# get post Node object from node dictionary
post = self.get_object(post)
# get the origin from the pre Node
pre_origin = self.get_origin(pre, func)
# get pre Node object from node dictionary
pre_name = pre
pre = self.get_object(pre)
# get decoded_output from specified origin
pre_output = pre_origin.decoded_output
dim_pre = pre_origin.dimensions
if transform is not None:
# make sure contradicting things aren't simultaneously specified
assert ((weight == 1) and (index_pre is None)
and (index_post is None))
if isinstance(post, simplenode.SimpleNode):
assert index_post is None
dim_post = post.dimensions[term_name]
else: dim_post = post.dimensions
# if decoded-decoded connection (case 1)
# compute transform if not given, if given make sure shape is correct
transform = helpers.compute_transform(
dim_pre=dim_pre,
dim_post=dim_post,
array_size=post.array_size,
weight=weight,
index_pre=index_pre,
index_post=index_post,
transform=transform)
# apply transform matrix, directing pre dimensions
# to specific post dimensions
decoded_output = TT.dot(transform, pre_output)
# pass in the pre population decoded output function
# to the post population, connecting them for theano
if isinstance(post, simplenode.SimpleNode):
post.set_input_source(name=term_name, pstc=pstc,
source=decoded_output)
else:
post.add_termination(name=pre_name, pstc=pstc,
decoded_input=decoded_output)
def connect_neurons(self, pre, post, weight_matrix, pstc=0.01,
func=None):
""" This function makes a connection to post-synaptic neurons
directly either from a pre-synaptic vector or neuron space,
depending on the shape of the weight matrix.
If weight_matrix is (post.neurons x pre.dim) then it connects
from the vector space of the pre-synaptic neuron, using the pre
synaptic decoders, but replacing the post-synaptic encoders.
If weight_matrix is (post.neurons x pre.neurons) then it connects
the neurons of the two populations directly together.
:param pre: pre-synaptic signal source
:type pre: Ensemble, Input, SimpleNode
note if neuron - neuron connection must be type Ensemble
:param Ensemble post: post-synaptic population of neurons
:param weight_matrix: set of connection weight strengths
:type weight_matrix: numpy.array, list
:param function func:
Not for use on neuron-neuron connections, only vector-neurons connections.
Function of the decoded origin to be the presynaptic connection.
If None, default origin used.
The function takes a single parameter ``x``, which is
the current value of the *pre* ensemble, and must return
either a float or an array of floats.
"""
post = self.get_object(post)
# get the origin from the pre Node
pre_origin = self.get_origin(pre, func)
# get pre Node object from node dictionary
pre_name = pre
pre = self.get_object(pre)
# get decoded_output from specified origin
pre_output = pre_origin.decoded_output
dim_pre = pre_origin.dimensions
weight_matrix = np.asarray(weight_matrix)
# make sure the weight_matrix is in the right form
if weight_matrix.shape[0] == post.array_size * post.neurons_num:
weight_matrix = weight_matrix.reshape(
[post.array_size, post.neurons_num] +\
list(weight_matrix.shape[1:]))
if len(weight_matrix.shape) == 2: # repeat array_size times
weight_matrix = np.tile(weight_matrix, (post.array_size, 1, 1))
# there are 2 cases
# 3) pre = encoded, post = encoded
# - in this case, weight_matrix will be (post.array_size x
# post.neurons_num x pre.array_size x pre.neurons_num)
# - encoded_input will be (post.array_size x post.neurons_num)
# check for pre side encoded connection (case 3)
if len(weight_matrix.shape) > 3 or \
weight_matrix.shape[2] == pre.array_size * pre.neurons_num:
if weight_matrix.shape[2] == pre.array_size * pre.neurons_num:
weight_matrix = weight_matrix.reshape(
[post.array_size, post.neurons_num,
pre.array_size, pre.neurons_num])
assert weight_matrix.shape == \
(post.array_size, post.neurons_num,
pre.array_size, pre.neurons_num)
# can't specify a function in this case
assert func == None
# get spiking output from pre population
pre_output = pre.neurons.output
encoded_output = (weight_matrix * pre_output)
# sum the contribution from all pre neurons
# for each post neuron
encoded_output = np.sum(encoded_output, axis=3)
# sum the contribution from each of the
# pre arrays for each post neuron
encoded_output = np.sum(encoded_output, axis=2)
# pass in the pre population encoded output function
# to the post population, connecting them for theano
post.add_termination(name=pre_name, pstc=pstc,
encoded_input=encoded_output)
return
# else
# 2) pre = decoded, post = encoded
# - in this case, weight_matrix will be size
# (post.array_size x post.neurons x pre.origin.dimensions)
# - encoded_input will be (post.array_size x post.neurons_num)
assert weight_matrix.shape == \
(post.array_size, post.neurons_num, dim_pre)
encoded_output = TT.zeros((post.array_size, post.neurons_num),
dtype='float32')
for ii in xrange(post.neurons_num):
encoded_output = TT.set_subtensor(encoded_output[:, ii],
TT.dot(weight_matrix[:, ii], pre_output))
# pass in the pre population encoded output function
# to the post population, connecting them for theano
post.add_termination(name=pre_name, pstc=pstc,
encoded_input=encoded_output)
def get_object(self, name):
"""This is a method for parsing input to return the proper object.
The only thing we need to check for here is a ':',
indicating an origin.
:param string name: the name of the desired object
"""
assert isinstance(name, str)
# separate into node and origin, if specified
split = name.split(':')
if len(split) == 1:
# no origin specified
return self.nodes[name]
elif len(split) == 2:
# origin specified
node = self.nodes[split[0]]
return node.origin[split[1]]
def get_origin(self, name, func=None):
"""This method takes in a string and returns the decoded_output function
of this object. If no origin is specified in name then 'X' is used.
:param string name: the name of the object(and optionally :origin) from
which to take decoded_output from
:returns: specified origin
"""
obj = self.get_object(name) # get the object referred to by name
if not isinstance(obj, origin.Origin):
# if obj is not an origin, find the origin
# the projection originates from
# take default identity decoded output from obj population
origin_name = 'X'
if func is not None:
# if this connection should compute a function
# set name as the function being calculated
origin_name = func.__name__
#TODO: better analysis to see if we need to build a new origin
# (rather than just relying on the name)
if origin_name not in obj.origin:
# if an origin for this function hasn't already been created
# create origin with to perform desired func
obj.add_origin(origin_name, func, dt=self.dt)
obj = obj.origin[origin_name]
else:
# if obj is an origin, make sure a function wasn't given
# can't specify a function for an already created origin
assert func == None
return obj
def learn(self, pre, post, error, pstc=0.01, **kwargs):
"""Add a connection with learning between pre and post,
modulated by error. Error can be a Node, or an origin. If no
origin is specified in the format node:origin, then 'X' is used.
:param Ensemble pre: the pre-synaptic population
:param Ensemble post: the post-synaptic population
:param Ensemble error: the population that provides the error signal
:param list weight_matrix:
the initial connection weights with which to start
"""
pre_name = pre
pre = self.get_object(pre)
post = self.get_object(post)
error = self.get_origin(error)
return post.add_learned_termination(name=pre_name, pre=pre,
error=error, pstc=pstc, dt=self.dt, **kwargs)
def make(self, name, *args, **kwargs):
"""Create and return an ensemble of neurons.
Note that all ensembles are actually arrays of length 1.
:param string name: name of the ensemble (must be unique)
:param int seed:
Random number seed to use.
If this is None and the Network was constructed
with a seed parameter, a seed will be randomly generated.
:returns: the newly created ensemble
"""
if 'seed' not in kwargs.keys():
if self.fixed_seed is not None:
kwargs['seed'] = self.fixed_seed
else:
# if no seed provided, get one randomly from the rng
kwargs['seed'] = self.random.randrange(0x7fffffff)
# just in case the model has been run previously,
# as adding a new node means we have to rebuild
# the theano function
self.theano_tick = None
kwargs['dt'] = self.dt
e = ensemble.Ensemble(*args, **kwargs)
# store created ensemble in node dictionary
if kwargs.get('mode', None) == 'direct':
self.tick_nodes.append(e)
self.nodes[name] = e
return e
def make_array(self, name, neurons, length, dimensions=1, **kwargs):
"""Generate a network array specifically.
This function is depricated; use for legacy code
or non-theano API compatibility.
"""
return self.make(
name=name, neurons=neurons, dimensions=dimensions,
array_size=length, **kwargs)
def make_input(self, *args, **kwargs):
"""Create an input and add it to the network."""
i = input.Input(*args, **kwargs)
self.add(i)
return i
def make_subnetwork(self, name):
"""Create a subnetwork. This has no functional purpose other than
to help organize the model. Components within a subnetwork can
be accessed through a dotted name convention, so an element B inside
a subnetwork A can be referred to as A.B.
:param name: the name of the subnetwork to create
"""
return subnetwork.SubNetwork(name, self)
def make_probe(self, target, name=None, dt_sample=0.01,
data_type='decoded', **kwargs):
"""Add a probe to measure the given target.
:param target: a Theano shared variable to record
:param name: the name of the probe
:param dt_sample: the sampling frequency of the probe
:returns: The Probe object
"""
i = 0
target_name = target + '-' + data_type
while name is None or self.nodes.has_key(name):
i += 1
name = ("Probe%d" % i)
# get the signal to record
if data_type == 'decoded':
target = self.get_origin(target).decoded_output
elif data_type == 'spikes':
target = self.get_object(target)
# check to make sure target is an ensemble
assert isinstance(target, ensemble.Ensemble)
target = target.neurons.output
# set the filter to zero
kwargs['pstc'] = 0
p = probe.Probe(name=name, target=target, target_name=target_name,
dt_sample=dt_sample, **kwargs)
self.add(p)
return p
def make_theano_tick(self):
"""Generate the theano function for running the network simulation.
:returns: theano function
"""
# dictionary for all variables
# and the theano description of how to compute them
updates = OrderedDict()
# for every node in the network
for node in self.nodes.values():
# if there is some variable to update
if hasattr(node, 'update'):
# add it to the list of variables to update every time step
updates.update(node.update(self.dt))
# create graph and return optimized update function
return theano.function([], [], updates=updates.items())#, mode='ProfileMode')
def run(self, time):
"""Run the simulation.
If called twice, the simulation will continue for *time*
more seconds. Note that the ensembles are simulated at the
dt timestep specified when they are created.
:param float time: the amount of time (in seconds) to run
:param float dt: the timestep of the update
"""
# if theano graph hasn't been calculated yet, retrieve it
if self.theano_tick is None:
self.theano_tick = self.make_theano_tick()
for i in range(int(time / self.dt)):
# get current time step
t = self.run_time + i * self.dt
# run the non-theano nodes
for node in self.tick_nodes:
node.t = t
node.theano_tick()
# run the theano nodes
self.theano_tick()
if i % 1000 == 0: print 'time: ', t, 's'
# update run_time variable
self.run_time += time
def write_data_to_hdf5(self, filename='data'):
"""This is a function to call after simulation that writes the
data of all probes to filename using the Neo HDF5 IO module.
:param string filename: the name of the file to write out to
"""
import neo
from neo import hdf5io
# get list of probes
probe_list = [self.nodes[node] for node in self.nodes
if node[:5] == 'Probe']
# if no probes then just return
if len(probe_list) == 0: return
# open up hdf5 file
if not filename.endswith('.hd5'): filename += '.hd5'
iom = hdf5io.NeoHdf5IO(filename=filename)
#TODO: set up to write multiple trials/segments to same block
# for trials run at different points
# create the all encompassing block structure
block = neo.Block()
# create the segment, representing a trial
segment = neo.Segment()
# put the segment in the block
block.segments.append(segment)
# create the appropriate Neo structures from the Probes data
#TODO: pair any analog signals and spike trains from the same
# population together into a RecordingChannel
for probe in probe_list:
# decoded signals become AnalogSignals
if probe.target_name.endswith('decoded'):
segment.analogsignals.append(
neo.AnalogSignal(
probe.get_data() * quantities.dimensionless,
sampling_period=probe.dt_sample * quantities.s,
target_name=probe.target_name) )
# spikes become spike trains
elif probe.target_name.endswith('spikes'):
# have to change spike train of 0s and 1s to list of times
for neuron in probe.get_data().T:
segment.spiketrains.append(
neo.SpikeTrain(
[
t * probe.dt_sample
for t, val in enumerate(neuron[0])
if val > 0
] * quantities.s,
t_stop=len(probe.data),
target_name=probe.target_name) )
else:
print 'Do not know how to write %s to NeoHDF5 file'%probe.target_name
assert False
# write block to file
iom.save(block)
# close up hdf5 file
iom.close()
def set_alias(self, alias, node):
"""Adds a named shortcut to an existing node within this network to be
used to simplify connect() calls.
For example, you can do::
net.set_alias('vision','A.B.C.D.E')
net.set_alias('motor','W.X.Y.Z')
net.connect('vision','motor')
:param string alias: the new short name to create
:param string node: the existing node name
"""
self.nodes[alias] = self.nodes[node]
|
ctn-waterloo/nengo_theano
|
nengo_theano/network.py
|
Python
|
mit
| 23,984
|
[
"NEURON"
] |
93d54c17c84fd58a86f6616434194ea0b0b4d9f6d4395475a3c65a0e9042c400
|
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# This module contains global data
#
from ige.ospace import Const
app = None
mainGameDlg = None
updateDlgs = []
cmdInProgress = 0
# i18n (delayed translation)
def N_(msg): return msg
planetTypes = {
'A': N_('Asteroid'),
'G': N_('Gas Giant'),
'R': N_('Rock'),
'C': N_('Cold'),
'D': N_('Desert'),
'H': N_('Hostile'),
'M': N_('Marginal'),
'E': N_('Terrestrial'),
'I': N_('Gaia'),
None: N_('Unknown'),
}
starTypes = {
'c': N_('Super giant'),
'g': N_('Giant'),
'm': N_('Main sequence'),
'd': N_('Dwarf'),
'b': N_('Black hole'),
'n': N_('Neutron star'),
'w': N_('Worm hole'),
}
shipClasses = {
0: N_('small'),
1: N_('medium'),
2: N_('large'),
3: N_('planet'),
}
fleetActions = {
0: N_('None'),
1: N_('Move to'),
1000: N_("Declare War at"),
1002: N_("Deploy Ship"),
1003: N_("Redirect to"),
1004: N_("Refuel at"),
1005: N_("Repeat from"),
1006: N_("Wait"),
1007: N_("Enter Wormhole at"),
}
stratRes = {
0: N_("None"),
1: N_("Uranium"),
2: N_("Titanium"),
3: N_("Chromium"),
4: N_("Silicium"),
5: N_("Carboneum"),
6: N_("Antimatter"),
7: N_("Plutonium"),
8: N_("Wolframium"),
100: N_("Mutagen"),
1000: N_("Unnilseptium")
}
relationNames = [N_("Enemy"), N_("Unfriendly"), N_("Unfriendly"), N_("Neutral"),
N_("Neutral"), N_("Friendly"), N_("Friendly"), N_("Allied")]
pactNames = {
Const.PACT_ALLOW_CIVILIAN_SHIPS: N_("Passage for civilian ships"),
Const.PACT_ALLOW_MILITARY_SHIPS: N_("Passage for military ships"),
Const.PACT_ALLOW_TANKING: N_("Permission to tank ships"),
Const.PACT_MINOR_CP_COOP: N_("Limited trade agreement"),
Const.PACT_MAJOR_CP_COOP: N_("Unlimited trade agreement"),
Const.PACT_SHARE_SCANNER: N_("Scanner map exchange"),
Const.PACT_MINOR_SCI_COOP: N_("Research information exchange"),
Const.PACT_MAJOR_SCI_COOP: N_("Research cooperation"),
}
pacts = [
Const.PACT_ALLOW_CIVILIAN_SHIPS,
Const.PACT_ALLOW_MILITARY_SHIPS,
Const.PACT_ALLOW_TANKING,
Const.PACT_MINOR_CP_COOP,
Const.PACT_SHARE_SCANNER,
Const.PACT_MINOR_SCI_COOP,
Const.PACT_MAJOR_CP_COOP,
Const.PACT_MAJOR_SCI_COOP,
]
pactStates = [N_("Disabled"), N_("Enabled"), N_("Active")]
mailboxSpec = {
(Const.T_PLAYER, "INBOX"): (N_("Mailbox"), "sender"),
(Const.T_PLAYER, "OUTBOX"): (N_("Sent"), None),
(Const.T_PLAYER, "EVENTS"): (N_("Events"), None),
(Const.T_UNIVERSE, "NEWS"): (N_("News"), None),
(Const.T_UNIVERSE, "PUBLIC"): (N_("Public"), "forum"),
(Const.T_GALAXY, "PUBLIC"): (N_("Public"), "forum"),
(Const.T_GALAXY, "NEWS"): (N_("News"), None),
}
def mailboxStripLang(forum):
if "_" in forum:
return forum.split("_")[1]
else:
return forum
# morale states
moraleStates = {
-0.875: N_("MORALE IS TERRIBLE"),
-0.75: N_("MORALE IS TERRIBLE"),
-0.625: N_("MORALE IS TERRIBLE"),
-0.50: N_("MORALE IS BAD"),
-0.375: N_("MORALE IS VERY LOW"),
-0.25: N_("MORALE IS LOW"),
-0.125: N_("MORALE IS BELOW AVERAGE"),
0.0: N_(" "),
0.125: N_("MORALE IS HIGH"),
0.25: N_("MORALE IS SUPERB"),
}
# severity
CRI = 3
MAJ = 2
MIN = 1
INFO = 0
NONE = INFO
DISABLED = -1
msgSeverity = {
CRI: N_("Critical"),
MAJ: N_("Major"),
MIN: N_("Minor"),
INFO: N_("Info"),
DISABLED: N_("Disabled"),
}
sevColors = {
CRI: (0xff, 0x80, 0x80),
MAJ: (0xff, 0xff, 0x00),
MIN: None,
NONE: (0xc0, 0xc0, 0xc0),
DISABLED: (0x80, 0x80, 0x80),
}
gameScenarios = {
Const.SCENARIO_SINGLE: N_("Single"),
Const.SCENARIO_COOP: N_("Cooperative"),
Const.SCENARIO_BRAWL: N_("Brawl"),
Const.SCENARIO_OUTERSPACE: N_("Outer Space")
}
gameScenarioDescriptions = {
Const.SCENARIO_SINGLE: N_("Single player game to enjoy freebuilding, learn the game, and try new strategies. Usually some AI opponents try to slow you down. You can pause galaxy any time, as well as finish it. Endless game, set your own goals."),
Const.SCENARIO_COOP: N_("Cooperative game pits you and other commanders against strong AI opposition. Learn how to play along other players to achieve common goal. Cooperation is not enforced by game, but it is recommended to utilize all pacts available. Game ends when AI empires cease to exist."),
Const.SCENARIO_BRAWL: N_("Brawl is a mode where you fight other commanders for supremacy. There is no other way to win, than to conquer everything. Usually there are no agressive AI present. You win, when you are the only human commander in the galaxy."),
Const.SCENARIO_OUTERSPACE: N_("Original and complete mode. True experience, full of fights between commanders, sprawling AI, diplomacy and backstabbing. Recommended to veteran players with good grasp of game concepts. To win this game, you have to convince others to vote for you, and if you have enough backing, elect you to become the Imperator of the galaxy.")
}
galaxyTypeDescriptions = {
'Circle1SP': N_("Basic training galaxy, with mutant as the only agressive enemy. Recommended for new players."),
'Circle2CP': N_("Cooperative galaxy, where you and another commander fend off and defeat sprawling mutant menace. Recommended to inexperienced players."),
'Circle3BP': N_("Tiny galaxy to brawl with two other commanders. Tactics prevail here, as there is not enough planets to make long term strategies viable."),
'Circle3SP': N_("More complex single player galaxy, with classic starting group of three commanders. Mutant is the only agressive enemy, two friendly Rebels start in the vicinity."),
'Circle3CP': N_("Cooperative galaxy, where you and two other commanders fend off and defeat sprawling mutant menace. Recommended to inexperienced players."),
'Circle5BP': N_("Small galaxy to brawl with four other commanders. Trust no one and keep watching your back."),
'Circle9P': N_("Smallest galaxy that contains full diplomacy rules set, limited strategic resources, and fully implements game mechanics. This galaxy is recommended for beginners who seek more casual gameplay with other like minded players."),
'Circle42P': N_("Original galaxy, place of epic battles and complex intrigues. Recommended only to the experienced players. It may become time consuming."),
'Circle65P': N_("Majestic galaxy of unmatched size. Be prepared to work primarily through diplomacy, as management of huge empire required for conquest would take all your time. Only for veteran players of many galaxies."),
}
gameChallenges = {
Const.T_AIPLAYER: N_("Rebel"),
Const.T_AIRENPLAYER: N_("Renegade"),
Const.T_AIMUTPLAYER: N_("Mutant"),
Const.T_AIPIRPLAYER: N_("Pirate"),
Const.T_AIEDENPLAYER: N_("EDEN"),
}
# StarMapWidget overlays
OVERLAY_OWNER = "owner"
OVERLAY_DIPLO = "diplomacy"
OVERLAY_BIO = "bio"
OVERLAY_FAME = "fame"
OVERLAY_MIN = "min"
OVERLAY_SLOT = "slot"
OVERLAY_STARGATE = "stargate"
OVERLAY_DOCK = "dock"
OVERLAY_MORALE = "morale"
OVERLAY_PIRATECOLONYCOST = "piratecolony"
OVERLAY_TYPES = [OVERLAY_OWNER, OVERLAY_DIPLO, OVERLAY_BIO, OVERLAY_FAME, OVERLAY_MIN, OVERLAY_SLOT, OVERLAY_STARGATE, OVERLAY_DOCK, OVERLAY_MORALE, OVERLAY_PIRATECOLONYCOST]
# colors
playerHighlightColor = (0xff, 0xa5, 0x4d)
playersHighlightColorsOld = {
32935: (0x55, 0x55, 0x55), # plasmon
32964: (0x00, 0xa0, 0xa0), # niki
32921: (0xaa, 0xaa, 0xaa), # medved
33216: (0xff, 0x00, 0x00), # artanis
32917: (0x88, 0x00, 0x00), # johanka
33400: (0xaa, 0x00, 0x00), # lev
33166: (0x33, 0x00, 0x00), # pedasr
32606: (0x00, 0x55, 0x55), # starlord
33266: (0x00, 0xaa, 0xff), # mcc
}
playersHighlightColors = {
}
objectFocus = {
}
savePassword = False
# i18n
del N_
|
ospaceteam/outerspace
|
client/osci/gdata.py
|
Python
|
gpl-2.0
| 8,560
|
[
"Galaxy"
] |
25c497eca7cf3eaca4c954913db32425b1538a48edbe36e55b54998a6f8cb3e6
|
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import glob
import MooseDocs
from MooseDocs.MooseMarkdown import MooseMarkdown
from MooseDocs.extensions.app_syntax import AppSyntaxExtension
def get_default_groups():
"""
Return the application group.
"""
if MooseDocs.ROOT_DIR == MooseDocs.MOOSE_DIR:
return []
else:
filenames = glob.glob(os.path.join(MooseDocs.ROOT_DIR, 'include', 'base', '*App.h'))
if not filenames:
return []
return [os.path.basename(filenames[0][:-5]).lower()]
return []
def check_options(parser):
"""
Command-line options for check command.
"""
parser.add_argument('--config-file', type=str, default='website.yml',
help="The configuration file to use for building the documentation using "
"MOOSE. (Default: %(default)s)")
parser.add_argument('--template', type=str, default='website.html',
help="The template html file to utilize (default: %(default)s).")
parser.add_argument('--generate', action='store_true',
help="When checking the application for complete documentation generate "
"any missing markdown documentation files.")
parser.add_argument('--update', action='store_true',
help="When checking the application for complete documentation generate "
"any missing markdown documentation files and update the stubs for "
"files that have not been modified.")
parser.add_argument('--dump', action='store_true',
help="Dump the complete MooseDocs syntax tree to the screen.")
parser.add_argument('--groups', default=get_default_groups(),
help="Specify the groups to consider in the check, by default only the "
"documentation for the application is considered, providing an empty "
"list will check all groups (default: %(default)s).")
def check(config_file=None, generate=None, update=None, dump=None, template=None, groups=None,
**template_args):
"""
Performs checks and optionally generates stub pages for missing documentation.
"""
# Create the markdown parser and get the AppSyntaxExtension
config = MooseDocs.load_config(config_file, template=template, template_args=template_args)
parser = MooseMarkdown(config)
ext = parser.getExtension(AppSyntaxExtension)
syntax = ext.getMooseAppSyntax()
# Dump the complete syntax tree if desired
if dump:
print syntax
# Check all nodes for documentation
for node in syntax.findall():
node.check(ext.getConfig('install'), generate=generate, groups=groups, update=update)
return 0
|
liuwenf/moose
|
python/MooseDocs/commands/check.py
|
Python
|
lgpl-2.1
| 4,200
|
[
"MOOSE"
] |
e4aad2de50566454d8795fe8b4b33afb14b57ba7dd9b01aafcbb3d7e935fc7c8
|
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from __future__ import division
from neon import NervanaObject
from neon.util.persist import load_class
import logging
import numpy as np
logger = logging.getLogger(__name__)
def get_param_list(layer_list):
'''
Returns a flattened list of parameters. Each element in the list
is a tuple ``((W, dW), states)`` for the parameters ``W``, parameter updates ``dW``,
and the current set of ``states``.
Args:
layer_list (list): List of layers
Returns:
param_list (list): List of parameters.
'''
plist = []
for l in layer_list:
ptuple = l.get_params()
plist.extend(ptuple) if isinstance(ptuple, list) else plist.append(ptuple)
return plist
class Optimizer(NervanaObject):
'''
The optimizer class handles the gradient update stage of training a neural network.
Given the current parameters :math:`w`, update parameters
:math:`\Delta w`, and current state :math:`s`, the optimizer specifies an
algorithm for performing the update.
This base class contains to helper functions for scaling the gradients.
specifices the abstract method optimize, which subclasses should implement. The optimize
method is called at every minibatch to update the layer parameters.
'''
def __init__(self, name=None):
"""
Class constructor.
"""
super(Optimizer, self).__init__(name=name)
def optimize(self, layer_list, epoch):
"""
Update the parameters for a provided list of layers.
Args:
layer_list (list): List of layers to optimize
epoch (integer): Epoch count of training
"""
raise NotImplementedError()
def clip_gradient_norm(self, param_list, clip_norm):
"""
Returns a scaling factor to apply to the gradients.
The scaling factor is computed such that the root mean squared
average of the scaled gradients across all layers will be less than
or equal to the provided clip_norm value. This factor is always <1, so
never scales up the gradients.
Arguments:
param_list (list): List of layer parameters
clip_norm (float, optional): Target norm for the gradients. If not provided
the returned scale_factor will equal 1.
Returns:
scale_factor (float): Computed scale factor.
"""
scale_factor = 1
if clip_norm:
grad_list = [grad for (param, grad), states in param_list]
grad_square_sums = sum(self.be.sum(self.be.square(grad)) for grad in grad_list)
grad_norm = self.be.zeros((1, 1))
grad_norm[:] = self.be.sqrt(grad_square_sums) / self.be.bsz
scale_factor = clip_norm / max(float(grad_norm.get()), float(clip_norm))
return scale_factor
def clip_gradient_value(self, grad, clip_value=None):
"""
Element-wise clip a list of gradients to between ``-clip_value`` and ``+clip_value``.
Arguments:
grad (list): List of gradients for a single layer
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
Returns:
grad (list): List of clipped gradients.
"""
if clip_value:
return self.be.clip(grad, -abs(clip_value), abs(clip_value))
else:
return grad
class Schedule(NervanaObject):
"""
Learning rate schedule.
By default implements a constant learning rate:
.. code-block:: python
# Constant learning rate of 0.01 across training epochs
optimizer = GradientDescentMomentum(0.01, 0.9, schedule = Schedule())
Otherwise, the schedule multiplies the learning rate by change at every element in
``step_config``.
For example,
.. code-block:: python
schedule = Schedule(step_config=[2, 6], change=0.5)
optimizer = GradientDescentMomentum(1.0, 0.9, schedule = Schedule())
will yield a learning rate schedule of:
.. csv-table::
:header: "Epoch", "LR"
:widths: 20, 10
0, 1.0
1, 1.0
2, 0.5
3, 0.5
4, 0.5
5, 0.5
6, 0.25
7, 0.25
8, 0.25
9, 0.25
"""
def __init__(self, step_config=None, change=1.):
"""
Class constructor.
Arguments:
step_config (list, optional): Configure the step times (list of epoch indices).
Defaults to None (constant).
change (int, optional): The learning rate is
multiplied by ``change ** steps``, where ``steps`` is the
number of steps in the step schedule that have passed.
"""
if isinstance(step_config, list) and isinstance(change, list):
assert len(step_config) == len(change), "change and step_config must have the same" \
"length after step_config is deduplicated to do epoch-level LR assignment."
logger.warn("This functionality will be removed from Schedule in the future. "
"Please use the StepSchedule class instead.")
if isinstance(step_config, int):
logger.warn("This functionality will be removed from Schedule in the future. "
"Please use the PowerSchedule class instead.")
self.step_config = step_config
self.change = change
self.steps = 0
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate
Returns:
(float): The adjusted learning rate
"""
# will be moved to StepSchedule in the future
if isinstance(self.step_config, list) and isinstance(self.change, list):
if epoch in self.step_config:
# steps will store the current lr
self.steps = self.change[self.step_config.index(epoch)]
if self.steps == 0:
return learning_rate
else:
return self.steps
# will be moved to PowerSchedule in the future
elif isinstance(self.step_config, int):
self.steps = np.floor(epoch / self.step_config)
elif isinstance(self.step_config, list):
self.steps = np.sum(epoch >= np.array(self.step_config))
return float(learning_rate * self.change ** self.steps)
class StepSchedule(Schedule):
"""
Steps the learning rate over training time.
To set a step schedule, pass as arguments ``step_config`` and ``change``. The schedule
will set the learning rate at ``step[i]`` to ``change[i]``. For example, the call:
.. code-block:: python
schedule = Schedule(step_config=[2, 6], change=[0.6, 0.4])
will set the learning rate to 0.6 at step 2, and to 0.4 at step 6.
"""
def __init__(self, step_config, change):
"""
Class constructor.
Arguments:
step_config (list): Configure the step times (list of epoch indices)
change (list): List of learning rates. Must be same length as step_config
"""
assert isinstance(step_config, list) and isinstance(change, list), \
"The arguments change and step_config must be lists."
assert len(step_config) == len(change), \
"The arguments change and step_config must have the same length."
self.step_config = step_config
self.change = change
self.steps = 0
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate
Returns:
(float): The adjusted learning rate
"""
if epoch in self.step_config:
# steps will store the current lr
self.steps = self.change[self.step_config.index(epoch)]
if self.steps == 0:
return learning_rate
else:
return self.steps
class PowerSchedule(Schedule):
"""
Multiplies the learning rate by a factor at regular epoch intervals.
This schedule will multiply the learning rate by
the factor ``change`` every ``step_config`` epochs. For example,
.. code-block:: python
schedule = Schedule(step_config=2, change=0.5)
optimizer = GradientDescentMomentum(0.1, 0.9, schedule=schedule)
will yield a learning rate schedule of:
.. csv-table::
:header: "Epoch", "LR"
:widths: 20, 10
0, 0.1
1, 0.1
2, 0.05
3, 0.05
4, 0.025
5, 0.025
6, 0.0125
7, 0.0125
"""
def __init__(self, step_config, change):
"""
Class constructor.
Arguments:
step_config (int): Learning rate update interval (in epochs)
change (int): Update factor
"""
assert isinstance(step_config, int), \
"The argument step_config must be an integer."
assert not isinstance(change, list), \
"The argument change must be a float or integer."
self.step_config = step_config
self.change = change
self.steps = 0
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate.
Returns:
(float): The adjusted learning rate.
"""
self.steps = np.floor(epoch / self.step_config)
return float(learning_rate * self.change ** self.steps)
class ExpSchedule(Schedule):
"""
Exponential learning rate schedule. This schedule implements
.. math::
\\alpha(t) = \\frac{\\alpha_\\circ}{1 + \\beta t}
where :math:`\\beta` is the decay rate, and :math:`\\alpha_\\circ` is the
initial learning rate.
"""
def __init__(self, decay):
"""
Class constructor.
Arguments:
decay (float): Decay rate.
"""
self.decay = decay
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate.
Returns:
(float): The adjusted learning rate.
"""
return float(learning_rate / (1. + self.decay * epoch))
class PolySchedule(Schedule):
"""
Polynomial learning rate schedule.
This schedule takes as input the total number of epochs :math:`T` and a power :math:`\\beta`,
and produces the learning schedule:
.. math::
\\alpha(t) = \\alpha_\\circ \\times\\left(1-\\frac{t}{T}\\right)^\\beta
where :math:`\\alpha_\\circ` is the initial learning rate.
"""
def __init__(self, total_epochs, power):
"""
Class constructor.
Arguments:
total_epochs (int): Total number of epochs over which to calculate interpolated decay
power (float): Total decay parameter
"""
self.total_epochs = np.float32(total_epochs)
self.power = power
def get_learning_rate(self, learning_rate, epoch):
"""
Returns the current learning rate given the epoch and initial learning rate.
Arguments:
learning_rate (float): Initial learning rate
epoch (int): Current epoch, used to calculate the adjusted learning rate.
Returns:
(float): The adjusted learning rate.
"""
return float(learning_rate * (1. - epoch // self.total_epochs) ** self.power)
class ShiftSchedule(Schedule):
"""
Binary shift learning rate schedule.
Arguments:
interval (int): interval in epochs the learning rate is shifted
shift_size (int): amount to shift
"""
def __init__(self, interval, shift_size=1):
self.interval = interval
self.shift_size = shift_size
def get_learning_rate(self, learning_rate, epoch):
total_shift = -1 * self.shift_size * int(epoch/self.interval)
return float(self.be.shift(learning_rate, total_shift, value=False).get())
class GradientDescentMomentum(Optimizer):
"""
Stochastic gradient descent with momentum.
Given the parameters :math:`\\theta`, the learning rate :math:`\\alpha`,
and the gradients :math:`\\nabla J(\\theta; x)`
computed on the minibatch data :math:`x`, SGD updates the parameters via
.. math::
\\theta' = \\theta - \\alpha\\nabla J(\\theta; x)
Here we implement SGD with momentum. Momentum tracks the history of
gradient updates to help the system move faster through saddle points.
Given the additional parameters: momentum :math:`\gamma`, weight decay :math:`\lambda`,
and current velocity :math:`v`, we use the following update equations
.. math::
v' = \\gamma v - \\alpha(\\nabla J(\\theta; x) + \\lambda\\theta)
theta' = \\theta + v'
Example usage:
.. code-block:: python
from neon.optimizers import GradientDescentMomentum
# use SGD with learning rate 0.01 and momentum 0.9, while
# clipping the gradient magnitude to between -5 and 5.
opt = GradientDescentMomentum(0.01, 0.9, gradient_clip_value = 5)
"""
def __init__(self, learning_rate, momentum_coef, stochastic_round=False,
wdecay=0.0, gradient_clip_norm=None, gradient_clip_value=None,
name=None, schedule=Schedule()):
"""
Class constructor.
Arguments:
learning_rate (float): Multiplicative coefficient of updates
momentum_coef (float): Coefficient of momentum
stochastic_round (bool, optional): Set this to True for stochastic
rounding. If False (default)
rounding will be to nearest. If
True use default width
stochastic rounding. Note that
this only affects the GPU
backend.
wdecay (float, optional): Amount of weight decay. Defaults to 0
gradient_clip_norm (float, optional): Target gradient norm.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
name (str, optional): the optimizer's layer's pretty-print name.
Defaults to "gdm".
schedule (neon.optimizers.optimizer.Schedule, optional): Learning
rate schedule. Defaults to a constant learning rate.
"""
super(GradientDescentMomentum, self).__init__(name=name)
self.learning_rate, self.momentum_coef = (learning_rate, momentum_coef)
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.wdecay = wdecay
self.schedule = schedule
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0 and self.momentum_coef != 0:
states.append(self.be.zeros_like(grad))
grad = grad / self.be.bsz
grad = self.clip_gradient_value(grad, self.gradient_clip_value)
if self.momentum_coef == 0:
velocity = - lrate * (scale_factor * grad + self.wdecay * param)
else:
velocity = states[0]
velocity[:] = velocity * self.momentum_coef \
- lrate * (scale_factor * grad + self.wdecay * param)
param[:] = param + velocity
class RMSProp(Optimizer):
"""
Root Mean Square propagation.
Root Mean Square (RMS) propagation protects against vanishing and
exploding gradients. In RMSprop, the gradient is divided by a running
average of recent gradients. Given the parameters :math:`\\theta`, gradient :math:`\\nabla J`,
we keep a running average :math:`\\mu` of the last :math:`1/\\lambda` gradients squared.
The update equations are then given by
.. math::
\\mu' &= \\lambda\\mu + (1-\\lambda)(\\nabla J)^2
.. math::
\\theta' &= \\theta - \\frac{\\alpha}{\\sqrt{\\mu + \\epsilon} + \\epsilon}\\nabla J
where we use :math:`\\epsilon` as a (small) smoothing factor to prevent from dividing by zero.
"""
def __init__(self, stochastic_round=False, decay_rate=0.95, learning_rate=2e-3, epsilon=1e-6,
gradient_clip_norm=None, gradient_clip_value=None, name=None,
schedule=Schedule()):
"""
Class constructor.
Arguments:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
decay_rate (float): decay rate of states
learning_rate (float): the multiplication coefficent of updates
epsilon (float): smoothing epsilon to avoid divide by zeros
gradient_clip_norm (float, optional): Target gradient norm.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
schedule (neon.optimizers.optimizer.Schedule, optional): Learning rate schedule.
Defaults to a constant.
Notes:
Only constant learning rate is supported currently.
"""
super(RMSProp, self).__init__(name=name)
self.state_list = None
self.epsilon = epsilon
self.decay_rate = decay_rate
self.learning_rate = learning_rate
self.schedule = schedule
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)
epsilon, decay = (self.epsilon, self.decay_rate)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
states.append(self.be.zeros_like(grad))
grad = grad / self.be.bsz
grad = self.clip_gradient_value(grad, self.gradient_clip_value)
# update state
state = states[0]
state[:] = decay * state + self.be.square(grad) * (1.0 - decay)
param[:] = param \
- (scale_factor * grad * lrate) / (self.be.sqrt(state + epsilon) + epsilon)
class Adagrad(Optimizer):
"""
Adagrad optimization algorithm.
Adagrad is an algorithm that adapts the learning rate individually for each parameter
by dividing by the :math:`L_2`-norm of all previous gradients. Given the parameters
:math:`\\theta`, gradient :math:`\\nabla J`, accumulating norm :math:`G`, and smoothing
factor :math:`\\epsilon`, we use the update equations:
.. math::
G' = G + (\\nabla J)^2
.. math::
\\theta' = \\theta - \\frac{\\alpha}{\sqrt{G' + \\epsilon}} \\nabla J
where the smoothing factor :math:`\\epsilon` prevents from dividing by zero.
By adjusting the learning rate individually for each parameter, Adagrad adapts
to the geometry of the error surface. Differently scaled weights have appropriately scaled
update steps.
Example usage:
.. code-block:: python
from neon.optimizers import Adagrad
# use Adagrad with a learning rate of 0.01
optimizer = Adagrad(learning_rate=0.01, epsilon=1e-6)
"""
def __init__(self, stochastic_round=False, learning_rate=0.01, epsilon=1e-6,
gradient_clip_norm=None, gradient_clip_value=None, name=None):
"""
Class constructor.
Arguments:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
learning_rate (float): the multiplication coefficent of updates
epsilon (float): smoothing epsilon to avoid divide by zeros
gradient_clip_norm (float, optional): Target gradient norm.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip
gradients.
Defaults to None.
Notes:
Only constant learning rate is supported currently.
"""
super(Adagrad, self).__init__(name=name)
self.state_list = None
self.epsilon = epsilon
self.learning_rate = learning_rate
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
layer_list (list): a list of Layer objects to optimize.
epoch (int): the current epoch, needed for the Schedule object.
"""
lrate, epsilon = (self.learning_rate, self.epsilon)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
states.append(self.be.zeros_like(grad))
grad = grad / self.be.bsz
grad = self.clip_gradient_value(grad, self.gradient_clip_value)
# update state
state = states[0]
state[:] = state + self.be.square(grad)
param[:] = param - (scale_factor * grad * lrate) / (self.be.sqrt(state + epsilon))
class Adadelta(Optimizer):
"""
Adadelta optimization algorithm.
Similar to RMSprop, Adadelta tracks the running average of the
gradients, :math:`\\mu_J`, over a window size :math:`1/\\lambda`, where
:math:`\\lambda` is the parameter ``decay``. Adadelta also tracks an average of the
recent update steps, which we denote as :math:`\\mu_\\theta`, and sets the learning rate
as the ratio of the two averages:
.. math::
\\mu_J' &= \\lambda\\mu_J + (1-\\lambda) (\\nabla J)^2
.. math::
\\Delta \\theta &= \\sqrt{\\frac{\\mu_\\theta + \\epsilon}{\\mu_J' + \\epsilon}} \\nabla J
.. math::
\\mu_\\theta &= \\lambda \\mu_\\theta + (1-\\rho) (\\Delta \\theta)^2
.. math::
\\theta &= \\theta - \\Delta \\theta
Note that the learning rate is a ratio of the average updates from the
previous step, :math:`\\mu_\\theta`, divided by the average gradients including the current
step, :math:`\\mu'_J`.
Example usage:
.. code-block:: python
from neon.optimizers import Adadelta
# use Adagrad with a learning rate of 0.01
optimizer = Adadelta(decay=0.95, epsilon=1e-6)
"""
def __init__(self, stochastic_round=False, decay=0.95, epsilon=1e-6, name=None):
"""
Class constructor.
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
decay: decay parameter in Adadelta
epsilon: epsilon parameter in Adadelta
"""
super(Adadelta, self).__init__(name=name)
self.decay = decay
self.epsilon = epsilon
self.stochastic_round = stochastic_round
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads,
and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
epsilon, decay = (self.epsilon, self.decay)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# E[Grad^2], E[Delt^2], updates
states.extend([self.be.zeros_like(grad) for i in range(3)])
grad = grad / self.be.bsz
states[0][:] = states[0] * decay + (1. - decay) * grad * grad
states[2][:] = self.be.sqrt((states[1] + epsilon) / (states[0] + epsilon)) * grad
states[1][:] = states[1] * decay + (1. - decay) * states[2] * states[2]
param[:] = param - states[2]
class Adam(Optimizer):
"""
Adam optimizer.
The Adam optimizer combines features from RMSprop and Adagrad. We
accumulate both the first and second moments of the gradient with decay
rates :math:`\\beta_1` and :math:`\\beta_2` corresponding to window sizes of
:math:`1/\\beta_1` and :math:`1/\\beta_2`, respectively.
.. math::
m' &= \\beta_1 m + (1-\\beta_1) \\nabla J
.. math::
v' &= \\beta_2 v + (1-\\beta_2) (\\nabla J)^2
We update the parameters by the ratio of the two moments:
.. math::
\\theta = \\theta - \\alpha \\frac{\\hat{m}'}{\\sqrt{\\hat{v}'}+\\epsilon}
where we compute the bias-corrected moments :math:`\\hat{m}'` and :math:`\\hat{v}'` via
.. math::
\\hat{m}' &= m'/(1-\\beta_1^t)
.. math::
\\hat{v}' &= v'/(1-\\beta_1^t)
Example usage:
.. code-block:: python
from neon.optimizers import Adam
# use Adam
optimizer = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)
"""
def __init__(self, stochastic_round=False, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, gradient_clip_norm=None, gradient_clip_value=None, name="adam"):
"""
Class constructor.
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
learning_rate (float): the multiplicative coefficient of updates
beta_1 (float): Adam parameter beta1
beta_2 (float): Adam parameter beta2
epsilon (float): numerical stability parameter
gradient_clip_norm (float, optional): Target gradient norm.
Defaults to None.
gradient_clip_value (float, optional): Value to element-wise clip gradients.
Defaults to None.
"""
super(Adam, self).__init__(name=name)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.learning_rate = learning_rate
self.stochastic_round = stochastic_round
self.gradient_clip_norm = gradient_clip_norm
self.gradient_clip_value = gradient_clip_value
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads, and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
t = epoch + 1
l = self.learning_rate * self.be.sqrt(1 - self.beta_2 ** t) / (1 - self.beta_1 ** t)
param_list = get_param_list(layer_list)
scale_factor = self.clip_gradient_norm(param_list, self.gradient_clip_norm)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# running_1st_mom, running_2nd_mom
states.extend([self.be.zeros_like(grad) for i in range(2)])
grad = grad / self.be.bsz
grad = self.clip_gradient_value(grad, self.gradient_clip_value)
m, v = states
m[:] = m * self.beta_1 + (1. - self.beta_1) * grad
v[:] = v * self.beta_2 + (1. - self.beta_2) * grad * grad
param[:] = param - (scale_factor * l * m) / (self.be.sqrt(v) + self.epsilon)
class ShiftAdaMax(Optimizer):
"""
Shift based AdaMax. http://arxiv.org/pdf/1602.02830v3.pdf
"""
def __init__(self, stochastic_round=False, learning_rate=0.002, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, schedule=Schedule(), name="ShiftAdaMax"):
"""
Args:
stochastic_round (bool): Set this to True for stochastic rounding.
If False rounding will be to nearest.
If True will perform stochastic rounding using default width.
Only affects the gpu backend.
learning_rate (float): the multiplicative coefficient of updates
beta_1 (float): Adam parameter beta1
beta_2 (float): Adam parameter beta2
epsilon (float): numerical stability parameter
schedule (neon.optimizers.optimizer.Schedule, optional): Learning rate schedule.
Defaults to a constant.
"""
super(ShiftAdaMax, self).__init__(name=name)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.learning_rate = learning_rate
self.stochastic_round = stochastic_round
self.schedule = schedule
def optimize(self, layer_list, epoch):
"""
Apply the learning rule to all the layers and update the states.
Arguments:
param_list (list): a list of tuples of the form ((param, grad), state),
corresponding to parameters, grads, and states of layers to be updated
epoch (int): the current epoch, needed for the Schedule object.
"""
t = epoch + 1
lrate = self.schedule.get_learning_rate(self.learning_rate, epoch)
l = lrate / (1 - self.beta_1 ** t)
param_list = get_param_list(layer_list)
for (param, grad), states in param_list:
param.rounding = self.stochastic_round
if len(states) == 0:
# running_1st_mom, running_2nd_mom
states.extend([self.be.zeros_like(grad) for i in range(3)])
grad = grad / self.be.bsz
m, v, inv_v = states
m[:] = m * self.beta_1 + (1. - self.beta_1) * grad
v[:] = self.be.maximum(v * self.beta_2, self.be.absolute(grad))
inv_v[:] = 1.0 / (v + self.epsilon)
param[:] = param - self.be.shift(self.be.shift(m, inv_v), l)
self.be.clip(param, -1, 1, param)
class MultiOptimizer(Optimizer):
"""
A wrapper class for using multiple Optimizers within the same model.
To assign different optimizers to different layers we first define
the different optimizers:
.. code-block:: python
from neon.optimizers import GradientDescentMomentum, RMSprop
optimizer_A = GradientDescentMomentum(learning_rate=0.01, momentum_coef=0.9)
optimizer_B = GradientDescentMomentum(learning_rate=0.05, momentum_coef=0.9)
optimizer_C = RMSprop(learning_rate=2e-3, decay_rate=0.95)
Then, we instantiate this class and pass a
dictionary mapping layers to optimizers. The keys can either be:
``default``, a layer class name (e.g. ``Bias``), or the Layer's name
attribute. The latter takes precedence for finer layer-to-layer control.
For example, if we have the following layers,
.. code-block:: python
layers = []
layers.append(Linear(nout = 100, init=Gaussian(), name="layer_one"))
layers.append(Linear(nout = 50, init=Gaussian(), name="layer_two"))
layers.append(Affine(nout = 5, init=Gaussian(), activation=Softmax()))
we can define multiple optimizers with
.. code-block:: python
from neon.optimizers import MultiOptimizer
# dictionary of mappings
mapping = {'default': optimizer_A, # default optimizer
'Linear': optimizer_B, # all layers from the Linear class
'layer_two': optimizer_C} # this overrides the previous entry
# use multiple optimizers
opt = MultiOptimizer(mapping)
After definition, we have the following mapping
+----------------------+----------------------------+
| Layer | Optimizer |
+======================+============================+
| ``layer_one`` | ``optimizer_B`` |
+----------------------+----------------------------+
| ``layer_two`` | ``optimizer_C`` |
+----------------------+----------------------------+
| ``Affine.Linear`` | ``optimizer_B`` |
+----------------------+----------------------------+
| ``Affine.Bias`` | ``optimizer_A`` |
+----------------------+----------------------------+
| ``Affine.Softmax`` | ``None (no parameters)`` |
+----------------------+----------------------------+
"""
def __init__(self, optimizer_mapping, name=None):
"""
Class constructor.
Args:
optimizer_mapping (dict): dictionary specifying the mapping of layers to optimizers.
Key: ``'default'``, layer class name or layer `name` attribute.
Don't name your layers ``'default'``. Value: the optimizer object to
use for those layers.
"""
super(MultiOptimizer, self).__init__(name=name)
self.optimizer_mapping = optimizer_mapping
assert 'default' in self.optimizer_mapping, "Must specify a default" \
"optimizer in layer type to optimizer mapping"
self.map_list = None
@classmethod
def gen_class(cls, pdict):
for key in pdict['optimizer_mapping']:
# these should be optimizers
typ = pdict['optimizer_mapping'][key]['type']
ocls = load_class(typ)
if 'config' not in pdict['optimizer_mapping'][key]:
pdict['optimizer_mapping'][key]['config'] = {}
conf = pdict['optimizer_mapping'][key]['config']
pdict['optimizer_mapping'][key] = ocls.gen_class(conf)
return cls(**pdict)
def get_description(self):
desc = {'type': self.modulenm}
desc['config'] = {'optimizer_mapping': {}}
for key in self.optimizer_mapping:
opt_desc = self.optimizer_mapping[key].get_description()
desc['config']['optimizer_mapping'][key] = opt_desc
return desc
def _map_optimizers(self, layer_list):
"""
maps the optimizers to their corresponding layers
"""
map_list = dict()
for layer in layer_list:
classname = layer.__class__.__name__
name = layer.name
opt = None
if name in self.optimizer_mapping:
opt = self.optimizer_mapping[name]
elif classname in self.optimizer_mapping:
opt = self.optimizer_mapping[classname]
else:
opt = self.optimizer_mapping['default']
if opt not in map_list:
map_list[opt] = [layer]
else:
map_list[opt].append(layer)
return map_list
def _reset_mapping(self, new_mapping):
"""
Pass this optimizer a new mapping, and on subsequent optimize call, the
mapping will be refreshed (since map_list will be recreated)
"""
self.optimizer_mapping = new_mapping
self.map_list = None
def optimize(self, layer_list, epoch):
"""
Determine which optimizer in the container should go with which layers,
then apply their optimize functions to those layers.
Notes:
We can recalculate ``map_list`` in case ``optimizer_mapping`` changes
during training.
"""
if self.map_list is None:
self.map_list = self._map_optimizers(layer_list)
for opt in self.map_list:
opt.optimize(self.map_list[opt], epoch)
|
matthijsvk/multimodalSR
|
code/Experiments/neon-master/neon/optimizers/optimizer.py
|
Python
|
mit
| 40,063
|
[
"Gaussian"
] |
1633e9a7cad0e80811427adbaf6876f2bad6116243665b3c170dfedf772c26e4
|
# Compare Algorithms
import pandas
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import label_binarize
import os
import sys
config_path = "utilities/"
sys.path.append(os.path.abspath(config_path))
from MyAPI import MyAPI
api = MyAPI()
X, Y = api.get_dataset(0, start_index=0,end_index=20000, nr=20000)
# prepare models
models = []
models.append(('KNN', KNeighborsClassifier()))
models.append(('Decision Tree', DecisionTreeClassifier()))
models.append(('Gaussian', GaussianNB()))
models.append(('SVM', SVC()))
classes=list(set(Y))
# prepare configuration for cross validation test harness
seed = 7
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.savefig('compare.png')
|
alod83/osiris
|
plot/compare_algorithms.py
|
Python
|
mit
| 1,613
|
[
"Gaussian"
] |
1ba223496fed266a6f91dabe42dc1da718cc5044f9a0a3fe6a3d9a026786fc9d
|
import ocl
import camvtk
import time
import vtk
if __name__ == "__main__":
print ocl.version()
myscreen = camvtk.VTKScreen()
stl = camvtk.STLSurf("../stl/gnu_tux_mod.stl")
print "STL surface read"
myscreen.addActor(stl)
stl.SetWireframe()
polydata = stl.src.GetOutput()
s= ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STLSurf with ", s.size(), " triangles"
# define a cutter
cutter = ocl.CylCutter(0.6, 5)
print cutter
print "creating PathDropCutter()"
pdc = ocl.PathDropCutter() # create a pdc
print "set STL surface"
pdc.setSTL(s)
print "set cutter"
pdc.setCutter(cutter) # set the cutter
print "set minimumZ"
pdc.minimumZ = -1 # set the minimum Z-coordinate, or "floor" for drop-cutter
print "set the sampling interval"
pdc.setSampling(0.0123)
# some parameters for this "zigzig" pattern
ymin=0
ymax=12
Ny=40 # number of lines in the y-direction
dy = float(ymax-ymin)/Ny # the y step-over
path = ocl.Path() # create an empty path object
# add Line objects to the path in this loop
for n in xrange(0,Ny):
y = ymin+n*dy
p1 = ocl.Point(0,y,0) # start-point of line
p2 = ocl.Point(9,y,0) # end-point of line
l = ocl.Line(p1,p2) # line-object
path.append( l ) # add the line to the path
print " set the path for pdf "
pdc.setPath( path )
print " run the calculation "
t_before = time.time()
pdc.run() # run drop-cutter on the path
t_after = time.time()
print "run took ", t_after-t_before," s"
print "get the results "
clp = pdc.getCLPoints() # get the cl-points from pdf
print " render the CL-points"
camvtk.drawCLPointCloud(myscreen, clp)
#myscreen.addActor( camvtk.PointCloud(pointlist=clp, collist=ccp) )
myscreen.camera.SetPosition(3, 23, 15)
myscreen.camera.SetFocalPoint(5, 5, 0)
myscreen.render()
print " All done."
myscreen.iren.Start()
|
JohnyEngine/CNC
|
opencamlib/scripts/pathdropcutter_test_1.py
|
Python
|
apache-2.0
| 2,141
|
[
"VTK"
] |
c7e124db64cc3bb5302c5a0f7fd789e99aab4c328de83d15cd684ef90e1020f6
|
import unittest
from itertools import combinations, permutations
from phevaluator.hash import hash_quinary
from phevaluator.tables import NO_FLUSH_5
class TestNoFlush5Table(unittest.TestCase):
TABLE = [0] * len(NO_FLUSH_5)
VISIT = [0] * len(NO_FLUSH_5)
CUR_RANK = 1
NUM_CARDS = 5
@classmethod
def setUpClass(cls):
cls.mark_straight_flush()
cls.mark_four_of_a_kind()
cls.mark_full_house()
cls.mark_flush()
cls.mark_straight()
cls.mark_three_of_a_kind()
cls.mark_two_pair()
cls.mark_one_pair()
cls.mark_high_card()
@staticmethod
def quinaries(n):
return permutations(range(13)[::-1], n)
@staticmethod
def quinaries_without_duplication():
return combinations(range(13)[::-1], 5)
@classmethod
def mark_four_of_a_kind(cls):
# Order 13C2 lexicographically
for base in cls.quinaries(2):
hand = [0] * 13
hand[base[0]] = 4
hand[base[1]] = 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
cls.TABLE[hash_] = cls.CUR_RANK
cls.VISIT[hash_] = 1
cls.CUR_RANK += 1
@classmethod
def mark_full_house(cls):
for base in cls.quinaries(2):
hand = [0] * 13
hand[base[0]] = 3
hand[base[1]] = 2
hash_ = hash_quinary(hand, cls.NUM_CARDS)
cls.TABLE[hash_] = cls.CUR_RANK
cls.VISIT[hash_] = 1
cls.CUR_RANK += 1
@classmethod
def mark_straight(cls):
for lowest in range(9)[::-1]: # From 10 to 2
hand = [0] * 13
for i in range(lowest, lowest + 5):
hand[i] = 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
cls.TABLE[hash_] = cls.CUR_RANK
cls.VISIT[hash_] = 1
cls.CUR_RANK += 1
# Five High Straight Flush
base = [12, 3, 2, 1, 0]
hand = [0] * 13
for pos in base:
hand[pos] = 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
cls.TABLE[hash_] = cls.CUR_RANK
cls.VISIT[hash_] = 1
cls.CUR_RANK += 1
@classmethod
def mark_three_of_a_kind(cls):
for base in cls.quinaries(3):
hand = [0] * 13
hand[base[0]] = 3
hand[base[1]] = 1
hand[base[2]] = 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
if cls.VISIT[hash_] == 0:
cls.TABLE[hash_] = cls.CUR_RANK
cls.VISIT[hash_] = 1
cls.CUR_RANK += 1
@classmethod
def mark_two_pair(cls):
for base in cls.quinaries(3):
hand = [0] * 13
hand[base[0]] = 2
hand[base[1]] = 2
hand[base[2]] = 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
if cls.VISIT[hash_] == 0:
cls.TABLE[hash_] = cls.CUR_RANK
cls.VISIT[hash_] = 1
cls.CUR_RANK += 1
@classmethod
def mark_one_pair(cls):
for base in cls.quinaries(4):
hand = [0] * 13
hand[base[0]] = 2
hand[base[1]] = 1
hand[base[2]] = 1
hand[base[3]] = 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
if cls.VISIT[hash_] == 0:
cls.TABLE[hash_] = cls.CUR_RANK
cls.VISIT[hash_] = 1
cls.CUR_RANK += 1
@classmethod
def mark_high_card(cls):
for base in cls.quinaries_without_duplication():
hand = [0] * 13
hand[base[0]] = 1
hand[base[1]] = 1
hand[base[2]] = 1
hand[base[3]] = 1
hand[base[4]] = 1
hash_ = hash_quinary(hand, cls.NUM_CARDS)
if cls.VISIT[hash_] == 0:
cls.TABLE[hash_] = cls.CUR_RANK
cls.VISIT[hash_] = 1
cls.CUR_RANK += 1
@classmethod
def mark_straight_flush(cls):
# A-5 High Straight Flush: 10
cls.CUR_RANK += 10
@classmethod
def mark_flush(cls):
# Selecting 5 cards in 13: 13C5
# Need to exclude straight: -10
cls.CUR_RANK += int(13 * 12 * 11 * 10 * 9 / (5 * 4 * 3 * 2)) - 10
def test_noflush5_table(self):
self.assertListEqual(self.TABLE, NO_FLUSH_5)
if __name__ == "__main__":
unittest.main()
|
HenryRLee/PokerHandEvaluator
|
python/tests/table_tests/test_hashtable5.py
|
Python
|
apache-2.0
| 4,398
|
[
"VisIt"
] |
7966de771333a029c9ccab2e15b565f43e31ad994c58534ad1ef442cc6863113
|
'''
ray optics
'''
from numpy import pi
from ocelot.optics.elements import *
import numpy as np
intersection_tol = 1.e-6
class Ray(object):
def __init__(self,r0=[0,0,0], k=[0,0,1], lamb = 2.0):
self.r0 = [np.array(r0)]
self.k = [np.array(k)]
self.lamb = lamb
self.s = [1]
self.c = 3.e8
self.obj = [OptDrift()]
@property
def w(self):
"""I'm the 'x' property."""
print("getter of w called")
return (2.*pi * self.c) / self.lamb
@w.setter
def w(self, value):
print("setter of w called" )
self.lamb = (2.*pi*self.c) / self.value
def find_intersections(ray, geo):
"""
find the first intersection point of a ray with geometry
"""
s = np.inf
obj = None
r_loc = np.array([0,0])
no = None
for o in geo():
debug('checking intersection:', o.id, o.r, o.no)
nk = np.dot(o.no, ray.k[-1])
nr = np.dot(o.no, o.r - ray.r0[-1])
#print nr, nk
if nr*nk > 0:
#TODO: check that intersection is on aperture
s_int= nr/nk #nr/nk is path length to intersection along the ray
if s_int < s and s_int > intersection_tol:
no = o.no
r_int = ray.r0[-1] + s_int * ray.k[-1]
debug('r_int=', r_int)
# check intersection with elliptic 'aperture'
r_loc = r_int - o.r
debug('r_loc unrotated=', r_loc)
phi = np.arccos(o.no[2]/ np.linalg.norm(o.no))
r_loc[1] = r_loc[1] * cos(phi) + r_loc[2] * sin(phi)
r_loc[2] = r_loc[2] * cos(phi) - r_loc[1] * sin(phi)
debug('r_loc=', r_loc, 'size=',o.size)
# correct intersection for curved elements
if o.__class__ == EllipticMirror:
# note that a[0] is the major axis
#r_loc[0] = r_loc[0] * cos(o.roll) + r_loc[1] * sin(o.roll)
#r_loc[1] = r_loc[1] * cos(o.roll) - r_loc[0] * sin(o.roll)
debug('r_loc=', r_loc, 'size=',o.size)
kz = ray.k[-1][2]
ky = ray.k[-1][1]
rz = r_int[2]
ry = r_int[1] - o.a[1]
az = o.a[0]
ay = o.a[1]
#debug('angle=', np.arctan(ry/rz) / pi)
a_ = kz**2/ az**2 + ky**2/ ay**2
b_ = -2*(kz*rz / az**2 + ky*ry / ay**2)
c_ = rz **2/ az**2 + ry**2/ ay**2 - 1.
d_ = b_**2 - 4*a_*c_
s1 = (- b_ + np.sqrt(d_) ) / (2.*a_)
s2 = (- b_ - np.sqrt(d_) ) / (2.*a_)
s_cor = np.min([s1,s2])
#debug('D=', d_, 's12=',s1,s2, s_cor)
#debug( (rz - s_cor*kz)**2 / az**2 + (ry - s_cor*ky)**2 / ay**2 )
#debug( (rz )**2 / az**2 + (ry )**2 / ay**2 )
debug('s_old=', s_int)
s_int = s_int - s_cor
debug('s_new=', s_int)
r_int = r_int - s_cor * ray.k[-1]
r_loc = r_int - o.r
#r_loc[1] = r_loc[1] * cos(phi)
#r_loc[2] = r_loc[2] * sin(phi)
debug('r_loc_new=', r_int, r_loc)
ang = arctan2(1./az*r_loc[2], 1./ay*(-r_loc[1] + ay))
#debug(r_loc[2], r_loc[1] - ay)
debug('ellipse angle=', ang)
debug('local coord:', az*sin(ang), -ay*cos(ang) + ay)
no = np.array([0, cos(ang),-ay/az*sin(ang)]) / np.sqrt(ay**2/az**2*sin(ang)**2 + cos(ang)**2 )
debug('no=',no)
debug(o.no)
if (r_loc[0]/o.size[0])**2 + (r_loc[1]/o.size[1])**2 <= 1:
s = s_int
obj = o
debug('fits aperture')
else:
debug('fits aperture not')
return s, obj, r_loc, no
def refl_matrix(no):
x, y, z = no
M = np.matrix([[-1. + 2.*x**2, 2.*x*y, 2.*x*z],
[2.*y*x, -1. + y**2*(1.+1), y*z*(1.+1)],
[2.*z*x, 2.*z*y , -1. + 2.*z**2]])
return M
def trace(ray, geo):
"""
tracing the ray, starting from last segment
"""
n_reflect = 0
n_reflect_max = 4
while n_reflect < n_reflect_max:
debug('ray at: ', ray.r0[-1])
# ray length to intersection
s, obj, r_loc, no = find_intersections(ray, geo)
if s == np.inf:
info('ray leaves geometry, terminating')
break
debug('intersection: s=', s, 'obj:', obj.id, 'normal', no)
#propagate to boundary
ray.s[-1] = s
r0_new = ray.r0[-1] + ray.k[-1] * ray.s[-1]
# reflect
if obj.__class__ == Mirror:
debug('reflecting off', obj.id)
k_new = np.asarray(np.dot( refl_matrix(obj.no), -ray.k[-1]))[0]
debug(ray.k[-1], '--->', k_new)
s_new = 1
ray.r0.append(r0_new)
ray.k.append(k_new)
ray.s.append(s_new)
n_reflect += 1
elif obj.__class__ == EllipticMirror:
debug('reflecting off', obj.id)
debug('no=',no,'k=',ray.k[-1])
'''
cs = np.dot(no, ray.k[-1]) / ( np.linalg.norm(no) * np.linalg.norm(ray.k[-1]) )
debug('cos=',cs)
if np.abs(cs) > 1:
print 'warning, reflection angle adjustment by ', cs + 1.0
if cs > 1: cs = 1.0
else: cs = -1.0
phi = np.arccos( cs )
debug('ray/normal angle', phi / pi ,'pi')
sgn = np.dot([1,0,0],np.cross(no, ray.k[-1]))
if np.linalg.norm(sgn) < 1.e-9:
sgn = sgn / np.linalg.norm(sgn)
else:
sgn = 1.0
debug('sgn=',sgn)
phi = (2*phi - pi) * sgn
debug('e:rotating by:', phi / pi, 'pi')
M = np.matrix([[1, 0, 0],
[0, cos(phi), sin(phi)],
[0, -sin(phi), cos(phi)]])
k_new = np.asarray(np.dot(M, ray.k[-1]))[0]
'''
k_new = np.asarray(np.dot( refl_matrix(no), -ray.k[-1]))[0]
debug(ray.k[-1], '--->', k_new)
s_new = 1
ray.r0.append(r0_new)
ray.k.append(k_new)
ray.s.append(s_new)
n_reflect += 1
elif obj.__class__ == Grating:
debug('reflecting off', obj.id)
debug(np.dot(obj.no, ray.k[-1]) / ( np.linalg.norm(obj.no) * np.linalg.norm(ray.k[-1]) ))
phi = np.arccos( np.dot(obj.no, ray.k[-1]) / ( np.linalg.norm(obj.no) * np.linalg.norm(ray.k[-1]) ) )
debug('ray/normal angle', phi / pi ,'pi')
sgn = np.dot([1,0,0],np.cross(obj.no, ray.k[-1]))
phi = (2*phi - pi) * sgn * (1+ 0.1 * ray.lamb)
debug('rotating by:', phi / pi, 'pi')
M = np.matrix([[1, 0, 0],
[0, cos(phi), sin(phi)],
[0, -sin(phi), cos(phi)]])
k_new = np.asarray(np.dot(M, ray.k[-1]))[0]
#print '###',ray.k[-1].shape, '###',obj.no
#k_new = rotate_pi(ray.k[-1], obj.no )
#k_new -
debug('k_new--->',k_new)
s_new = 1
ray.r0.append(r0_new)
ray.k.append(k_new)
ray.s.append(s_new)
n_reflect += 1
elif obj.__class__ == Aperture:
if (r_loc[0] / obj.d[0])**2 + (r_loc[1] / obj.d[1])**2 > 1:
debug('ray stopped at aperture')
break
else:
r0_new = r0_new + ray.k[-1]*intersection_tol * 2
k_new = ray.k[-1]
s_new = ray.s[-1]
n_reflect += 1
ray.r0.append(r0_new)
ray.k.append(k_new)
ray.s.append(s_new)
elif obj.__class__ == Crystal:
r0_new = r0_new + ray.k[-1]*intersection_tol * 2
k_new = ray.k[-1]
s_new = ray.s[-1]
n_reflect += 1
ray.r0.append(r0_new)
ray.k.append(k_new)
ray.s.append(s_new)
elif obj.__class__ == Lense:
debug('tracing thru lense, f=',obj.f, ' [m]')
r0_new = r0_new + ray.k[-1]*intersection_tol * 2
k_new = np.array([ray.k[-1][0]- r_loc[0]*ray.k[-1][2] / obj.f, ray.k[-1][1] - r_loc[1]*ray.k[-1][2] / obj.f, ray.k[-1][2] ])
s_new = ray.s[-1]
n_reflect += 1
ray.r0.append(r0_new)
ray.k.append(k_new)
ray.s.append(s_new)
elif obj.__class__ == Detector:
debug('detector hit')
obj.hit(r_loc)
k_new = ray.k[-1]
s_new = ray.s[-1]
n_reflect += 1
ray.r0.append(r0_new)
ray.k.append(k_new)
ray.s.append(s_new)
else:
warn('no propagator available, optics element:', obj)
r0_new = r0_new + ray.k[-1]*intersection_tol * 2
k_new = ray.k[-1]
s_new = ray.s[-1]
n_reflect += 1
ray.r0.append(r0_new)
ray.k.append(k_new)
ray.s.append(s_new)
ray.obj.append(obj)
|
ocelot-collab/ocelot
|
ocelot/optics/ray.py
|
Python
|
gpl-3.0
| 10,620
|
[
"CRYSTAL"
] |
97ffb4ae71b95abaeed7c35b597c994119f316c02a266c977d4271d20cc80739
|
# Copyright 2006, 2007, 2008, 2009 Brailcom, o.p.s.
#
# Author: Tomas Cerha <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
# # [[[TODO: richb - Pylint is giving us a bunch of warnings along these
# lines throughout this file:
#
# W0142:202:SpeechServer._send_command: Used * or ** magic
#
# So for now, we just disable these warnings in this module.]]]
#
# pylint: disable-msg=W0142
"""Provides an Orca speech server for Speech Dispatcher backend."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__author__ = "Tomas Cerha <[email protected]>"
__copyright__ = "Copyright (c) 2006-2008 Brailcom, o.p.s."
__license__ = "LGPL"
from gi.repository import GLib
import re
import time
from . import chnames
from . import debug
from . import guilabels
from . import messages
from . import speechserver
from . import settings
from . import orca_state
from . import punctuation_settings
from .acss import ACSS
try:
import speechd
except:
_speechd_available = False
else:
_speechd_available = True
try:
getattr(speechd, "CallbackType")
except AttributeError:
_speechd_version_ok = False
else:
_speechd_version_ok = True
PUNCTUATION = re.compile('[^\w\s]', re.UNICODE)
ELLIPSIS = re.compile('(\342\200\246|\.\.\.\s*)')
class SpeechServer(speechserver.SpeechServer):
# See the parent class for documentation.
_active_servers = {}
DEFAULT_SERVER_ID = 'default'
_SERVER_NAMES = {DEFAULT_SERVER_ID: guilabels.DEFAULT_SYNTHESIZER}
def getFactoryName():
return guilabels.SPEECH_DISPATCHER
getFactoryName = staticmethod(getFactoryName)
def getSpeechServers():
servers = []
default = SpeechServer._getSpeechServer(SpeechServer.DEFAULT_SERVER_ID)
if default is not None:
servers.append(default)
for module in default.list_output_modules():
servers.append(SpeechServer._getSpeechServer(module))
return servers
getSpeechServers = staticmethod(getSpeechServers)
def _getSpeechServer(cls, serverId):
"""Return an active server for given id.
Attempt to create the server if it doesn't exist yet. Returns None
when it is not possible to create the server.
"""
if serverId not in cls._active_servers:
cls(serverId)
# Don't return the instance, unless it is succesfully added
# to `_active_Servers'.
return cls._active_servers.get(serverId)
_getSpeechServer = classmethod(_getSpeechServer)
def getSpeechServer(info=None):
if info is not None:
thisId = info[1]
else:
thisId = SpeechServer.DEFAULT_SERVER_ID
return SpeechServer._getSpeechServer(thisId)
getSpeechServer = staticmethod(getSpeechServer)
def shutdownActiveServers():
for server in list(SpeechServer._active_servers.values()):
server.shutdown()
shutdownActiveServers = staticmethod(shutdownActiveServers)
# *** Instance methods ***
def __init__(self, serverId):
super(SpeechServer, self).__init__()
self._id = serverId
self._client = None
self._current_voice_properties = {}
self._acss_manipulators = (
(ACSS.RATE, self._set_rate),
(ACSS.AVERAGE_PITCH, self._set_pitch),
(ACSS.GAIN, self._set_volume),
(ACSS.FAMILY, self._set_family),
)
if not _speechd_available:
debug.println(debug.LEVEL_WARNING,
"Speech Dispatcher interface not installed.")
return
if not _speechd_version_ok:
debug.println(debug.LEVEL_WARNING,
"Speech Dispatcher version 0.6.2 or later is required.")
return
# The following constants must be initialized in runtime since they
# depend on the speechd module being available.
self._PUNCTUATION_MODE_MAP = {
settings.PUNCTUATION_STYLE_ALL: speechd.PunctuationMode.ALL,
settings.PUNCTUATION_STYLE_MOST: speechd.PunctuationMode.SOME,
settings.PUNCTUATION_STYLE_SOME: speechd.PunctuationMode.SOME,
settings.PUNCTUATION_STYLE_NONE: speechd.PunctuationMode.NONE,
}
self._CALLBACK_TYPE_MAP = {
speechd.CallbackType.BEGIN: speechserver.SayAllContext.PROGRESS,
speechd.CallbackType.CANCEL: speechserver.SayAllContext.INTERRUPTED,
speechd.CallbackType.END: speechserver.SayAllContext.COMPLETED,
#speechd.CallbackType.INDEX_MARK:speechserver.SayAllContext.PROGRESS,
}
self._default_voice_name = guilabels.SPEECH_DEFAULT_VOICE % serverId
try:
self._init()
except:
debug.println(debug.LEVEL_WARNING,
"Speech Dispatcher service failed to connect:")
debug.printException(debug.LEVEL_WARNING)
else:
SpeechServer._active_servers[serverId] = self
self._lastKeyEchoTime = None
def _init(self):
self._client = client = speechd.SSIPClient('Orca', component=self._id)
client.set_priority(speechd.Priority.MESSAGE)
if self._id != self.DEFAULT_SERVER_ID:
client.set_output_module(self._id)
self._current_voice_properties = {}
mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle]
client.set_punctuation(mode)
def updateCapitalizationStyle(self):
"""Updates the capitalization style used by the speech server."""
self._client.set_cap_let_recogn(settings.capitalizationStyle)
def updatePunctuationLevel(self):
""" Punctuation level changed, inform this speechServer. """
mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle]
self._client.set_punctuation(mode)
def _send_command(self, command, *args, **kwargs):
if hasattr(speechd, 'SSIPCommunicationError'):
try:
return command(*args, **kwargs)
except speechd.SSIPCommunicationError:
debug.println(debug.LEVEL_CONFIGURATION,
"Speech Dispatcher connection lost. "
"Trying to reconnect.")
self.reset()
return command(*args, **kwargs)
except:
pass
else:
# It is not possible tho catch the error with older SD versions.
return command(*args, **kwargs)
def _set_rate(self, acss_rate):
rate = int(2 * max(0, min(99, acss_rate)) - 98)
self._send_command(self._client.set_rate, rate)
def _set_pitch(self, acss_pitch):
pitch = int(20 * max(0, min(9, acss_pitch)) - 90)
self._send_command(self._client.set_pitch, pitch)
def _set_volume(self, acss_volume):
volume = int(15 * max(0, min(9, acss_volume)) - 35)
self._send_command(self._client.set_volume, volume)
def _set_family(self, acss_family):
familyLocale = acss_family.get(speechserver.VoiceFamily.LOCALE)
if not familyLocale:
import locale
familyLocale, encoding = locale.getdefaultlocale()
if familyLocale:
lang = familyLocale.split('_')[0]
if lang and len(lang) == 2:
self._send_command(self._client.set_language, str(lang))
try:
# This command is not available with older SD versions.
set_synthesis_voice = self._client.set_synthesis_voice
except AttributeError:
pass
else:
name = acss_family.get(speechserver.VoiceFamily.NAME)
if name != self._default_voice_name:
self._send_command(set_synthesis_voice, name)
def _apply_acss(self, acss):
if acss is None:
acss = settings.voices[settings.DEFAULT_VOICE]
current = self._current_voice_properties
for acss_property, method in self._acss_manipulators:
value = acss.get(acss_property)
if value is not None:
if current.get(acss_property) != value:
method(value)
current[acss_property] = value
elif acss_property == ACSS.AVERAGE_PITCH:
method(5.0)
current[acss_property] = 5.0
elif acss_property == ACSS.FAMILY \
and acss == settings.voices[settings.DEFAULT_VOICE]:
# We need to explicitly reset (at least) the family.
# See bgo#626072.
#
method({})
current[acss_property] = {}
def __addVerbalizedPunctuation(self, oldText):
"""Depending upon the users verbalized punctuation setting,
adjust punctuation symbols in the given text to their pronounced
equivalents. The pronounced text will either replace the
punctuation symbol or be inserted before it. In the latter case,
this is to retain spoken prosity.
Arguments:
- oldText: text to be parsed for punctuation.
Returns a text string with the punctuation symbols adjusted accordingly.
"""
spokenEllipsis = messages.SPOKEN_ELLIPSIS + " "
newText = re.sub(ELLIPSIS, spokenEllipsis, oldText)
symbols = set(re.findall(PUNCTUATION, newText))
for symbol in symbols:
try:
level, action = punctuation_settings.getPunctuationInfo(symbol)
except:
continue
if level != punctuation_settings.LEVEL_NONE:
# Speech Dispatcher should handle it.
#
continue
charName = " %s " % chnames.getCharacterName(symbol)
if action == punctuation_settings.PUNCTUATION_INSERT:
charName += symbol
newText = re.sub(symbol, charName, newText)
if orca_state.activeScript:
newText = orca_state.activeScript.utilities.adjustForDigits(newText)
return newText
def _speak(self, text, acss, **kwargs):
if isinstance(text, ACSS):
text = ''
text = self.__addVerbalizedPunctuation(text)
if orca_state.activeScript:
text = orca_state.activeScript.\
utilities.adjustForPronunciation(text)
# Replace no break space characters with plain spaces since some
# synthesizers cannot handle them. See bug #591734.
#
text = text.replace('\u00a0', ' ')
# Replace newline followed by full stop, since
# this seems to crash sd, see bgo#618334.
#
text = text.replace('\n.', '\n')
self._apply_acss(acss)
self._send_command(self._client.speak, text, **kwargs)
def _say_all(self, iterator, orca_callback):
"""Process another sayAll chunk.
Called by the gidle thread.
"""
try:
context, acss = next(iterator)
except StopIteration:
pass
else:
def callback(callbackType, index_mark=None):
# This callback is called in Speech Dispatcher listener thread.
# No subsequent Speech Dispatcher interaction is allowed here,
# so we pass the calls to the gidle thread.
t = self._CALLBACK_TYPE_MAP[callbackType]
if t == speechserver.SayAllContext.PROGRESS:
if index_mark:
context.currentOffset = int(index_mark)
else:
context.currentOffset = context.startOffset
elif t == speechserver.SayAllContext.COMPLETED:
context.currentOffset = context.endOffset
GLib.idle_add(orca_callback, context, t)
if t == speechserver.SayAllContext.COMPLETED:
GLib.idle_add(self._say_all, iterator, orca_callback)
self._speak(context.utterance, acss, callback=callback,
event_types=list(self._CALLBACK_TYPE_MAP.keys()))
return False # to indicate, that we don't want to be called again.
def _cancel(self):
self._send_command(self._client.cancel)
def _change_default_speech_rate(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
rate = acss[ACSS.RATE]
except KeyError:
rate = 50
acss[ACSS.RATE] = max(0, min(99, rate + delta))
debug.println(debug.LEVEL_CONFIGURATION,
"Speech rate is now %d" % rate)
self.speak(decrease and messages.SPEECH_SLOWER \
or messages.SPEECH_FASTER, acss=acss)
def _change_default_speech_pitch(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
pitch = acss[ACSS.AVERAGE_PITCH]
except KeyError:
pitch = 5
acss[ACSS.AVERAGE_PITCH] = max(0, min(9, pitch + delta))
debug.println(debug.LEVEL_CONFIGURATION,
"Speech pitch is now %d" % pitch)
self.speak(decrease and messages.SPEECH_LOWER \
or messages.SPEECH_HIGHER, acss=acss)
def _change_default_speech_volume(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
volume = acss[ACSS.GAIN]
except KeyError:
volume = 5
acss[ACSS.GAIN] = max(0, min(9, volume + delta))
debug.println(debug.LEVEL_CONFIGURATION,
"Speech volume is now %d" % volume)
self.speak(decrease and messages.SPEECH_SOFTER \
or messages.SPEECH_LOUDER, acss=acss)
def getInfo(self):
return [self._SERVER_NAMES.get(self._id, self._id), self._id]
def getVoiceFamilies(self):
# Always offer the configured default voice with a language
# set according to the current locale.
from locale import getlocale, LC_MESSAGES
locale = getlocale(LC_MESSAGES)[0]
if locale is None or locale == 'C':
lang = None
dialect = None
else:
lang, dialect = locale.split('_')
voices = ((self._default_voice_name, lang, None),)
try:
# This command is not available with older SD versions.
list_synthesis_voices = self._client.list_synthesis_voices
except AttributeError:
pass
else:
try:
voices += self._send_command(list_synthesis_voices)
except:
pass
families = [speechserver.VoiceFamily({ \
speechserver.VoiceFamily.NAME: name,
#speechserver.VoiceFamily.GENDER: speechserver.VoiceFamily.MALE,
speechserver.VoiceFamily.DIALECT: dialect,
speechserver.VoiceFamily.LOCALE: lang})
for name, lang, dialect in voices]
return families
def speak(self, text=None, acss=None, interrupt=True):
#if interrupt:
# self._cancel()
# "We will not interrupt a key echo in progress." (Said the comment in
# speech.py where these next two lines used to live. But the code here
# suggests we haven't been doing anything with the lastKeyEchoTime in
# years. TODO - JD: Dig into this and if it's truly useless, kill it.)
if self._lastKeyEchoTime:
interrupt = interrupt and (time.time() - self._lastKeyEchoTime) > 0.5
if text:
self._speak(text, acss)
def speakUtterances(self, utteranceList, acss=None, interrupt=True):
#if interrupt:
# self._cancel()
for utterance in utteranceList:
if utterance:
self._speak(utterance, acss)
def sayAll(self, utteranceIterator, progressCallback):
GLib.idle_add(self._say_all, utteranceIterator, progressCallback)
def speakCharacter(self, character, acss=None):
self._apply_acss(acss)
if character == '\n':
self._send_command(self._client.sound_icon, 'end-of-line')
return
name = chnames.getCharacterName(character)
if not name:
self._send_command(self._client.char, character)
return
if orca_state.activeScript:
name = orca_state.activeScript.\
utilities.adjustForPronunciation(name)
self.speak(name, acss)
def speakKeyEvent(self, event):
if event.isPrintableKey() and event.event_string.isupper():
acss = settings.voices[settings.UPPERCASE_VOICE]
else:
acss = ACSS(settings.voices[settings.DEFAULT_VOICE])
event_string = event.getKeyName()
if orca_state.activeScript:
event_string = orca_state.activeScript.\
utilities.adjustForPronunciation(event_string)
lockingStateString = event.getLockingStateString()
event_string = "%s %s" % (event_string, lockingStateString)
self.speak(event_string, acss=acss)
self._lastKeyEchoTime = time.time()
def increaseSpeechRate(self, step=5):
self._change_default_speech_rate(step)
def decreaseSpeechRate(self, step=5):
self._change_default_speech_rate(step, decrease=True)
def increaseSpeechPitch(self, step=0.5):
self._change_default_speech_pitch(step)
def decreaseSpeechPitch(self, step=0.5):
self._change_default_speech_pitch(step, decrease=True)
def increaseSpeechVolume(self, step=0.5):
self._change_default_speech_volume(step)
def decreaseSpeechVolume(self, step=0.5):
self._change_default_speech_volume(step, decrease=True)
def stop(self):
self._cancel()
def shutdown(self):
self._client.close()
del SpeechServer._active_servers[self._id]
def reset(self, text=None, acss=None):
self._client.close()
self._init()
def list_output_modules(self):
"""Return names of available output modules as a tuple of strings.
This method is not a part of Orca speech API, but is used internally
by the Speech Dispatcher backend.
The returned tuple can be empty if the information can not be
obtained (e.g. with an older Speech Dispatcher version).
"""
try:
return self._send_command(self._client.list_output_modules)
except AttributeError:
return ()
except speechd.SSIPCommandError:
return ()
|
pvagner/orca
|
src/orca/speechdispatcherfactory.py
|
Python
|
lgpl-2.1
| 19,547
|
[
"ORCA"
] |
74f7a59cb608d4109ac7d85c0c38338af84cff9c8a705d447217ee13f1ec4fc5
|
import logging
import os
import re
import shutil
from functools import partial, wraps
import netCDF4
import numpy as np
from django.core.exceptions import ValidationError
from django.conf import settings
from django.db import models, transaction
from django.forms.models import formset_factory, BaseFormSet
from django.template import Template, Context
from dominate.tags import div, legend, form, button, p, em, a, textarea, _input
import hs_file_types.nc_functions.nc_dump as nc_dump
import hs_file_types.nc_functions.nc_meta as nc_meta
import hs_file_types.nc_functions.nc_utils as nc_utils
from .base import AbstractFileMetaData, AbstractLogicalFile, FileTypeContext
from hs_app_netCDF.forms import VariableForm, VariableValidationForm, OriginalCoverageForm
from hs_app_netCDF.models import NetCDFMetaDataMixin, OriginalCoverage, Variable
from hs_core.forms import CoverageTemporalForm, CoverageSpatialForm
from hs_core.hydroshare import utils
from hs_core.models import Creator, Contributor
from hs_core.signals import post_add_netcdf_aggregation
from hs_core.enums import RelationTypes
class NetCDFFileMetaData(NetCDFMetaDataMixin, AbstractFileMetaData):
# the metadata element models are from the netcdf resource type app
model_app_label = 'hs_app_netCDF'
def get_metadata_elements(self):
elements = super(NetCDFFileMetaData, self).get_metadata_elements()
elements += [self.original_coverage]
elements += list(self.variables.all())
return elements
@classmethod
def get_metadata_model_classes(cls):
metadata_model_classes = super(NetCDFFileMetaData, cls).get_metadata_model_classes()
metadata_model_classes['originalcoverage'] = OriginalCoverage
metadata_model_classes['variable'] = Variable
return metadata_model_classes
@property
def original_coverage(self):
# There can be at most only one instance of type OriginalCoverage associated
# with this metadata object
return self.ori_coverage.all().first()
def _get_opendap_html(self):
opendap_div = div(cls="content-block")
res_id = self.logical_file.resource.short_id
file_name = self.logical_file.aggregation_name
opendap_url = f'{settings.THREDDS_SERVER_URL}dodsC/hydroshare/resources/{res_id}/data/contents/{file_name}.html'
with opendap_div:
legend('OPeNDAP using DAP2')
em('The netCDF data in this multidimensional content aggregation may be accessed at the link below '
'using the OPeNDAP DAP2 protocol enabled on the HydroShare deployment of Unidata’s THREDDS data server. '
'This enables direct and programmable access to this data through ')
a(" OPeNDAP client software",
href="https://www.opendap.org/support/OPeNDAP-clients",
target="_blank")
with div(style="margin-top:10px;"):
a(opendap_url, href=opendap_url, target='_blank')
return opendap_div.render()
def get_html(self, **kwargs):
"""overrides the base class function"""
html_string = super(NetCDFFileMetaData, self).get_html()
if self.logical_file.resource.raccess.public:
html_string += self._get_opendap_html()
if self.spatial_coverage:
html_string += self.spatial_coverage.get_html()
if self.originalCoverage:
html_string += self.originalCoverage.get_html()
if self.temporal_coverage:
html_string += self.temporal_coverage.get_html()
variable_legend = legend("Variables")
html_string += variable_legend.render()
for variable in self.variables.all():
html_string += variable.get_html()
# ncdump text from the txt file
html_string += self.get_ncdump_html().render()
template = Template(html_string)
context = Context({})
return template.render(context)
def get_html_forms(self, dataset_name_form=True, temporal_coverage=True, **kwargs):
"""overrides the base class function"""
root_div = div("{% load crispy_forms_tags %}")
with root_div:
self.get_update_netcdf_file_html_form()
super(NetCDFFileMetaData, self).get_html_forms()
with div():
with div(cls="content-block", id="original-coverage-filetype"):
with form(id="id-origcoverage-file-type",
action="{{ orig_coverage_form.action }}",
method="post", enctype="multipart/form-data"):
div("{% crispy orig_coverage_form %}")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 "
"col-md-2 col-xs-6"):
button("Save changes", type="button",
cls="btn btn-primary pull-right",
style="display: none;")
with div(cls="content-block", id="spatial-coverage-filetype"):
with form(id="id-spatial-coverage-file-type",
cls='hs-coordinates-picker', data_coordinates_type="box",
action="{{ spatial_coverage_form.action }}",
method="post", enctype="multipart/form-data"):
div("{% crispy spatial_coverage_form %}")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 "
"col-md-2 col-xs-6"):
button("Save changes", type="button",
cls="btn btn-primary pull-right",
style="display: none;")
with div():
legend("Variables")
# id has to be variables to get the vertical scrollbar
with div(id="variables"):
with div("{% for form in variable_formset_forms %}"):
with form(id="{{ form.form_id }}", action="{{ form.action }}",
method="post", enctype="multipart/form-data",
cls="well"):
div("{% crispy form %}")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 "
"col-md-2 col-xs-6"):
button("Save changes", type="button",
cls="btn btn-primary pull-right",
style="display: none;")
div("{% endfor %}")
self.get_ncdump_html()
template = Template(root_div.render())
temp_cov_form = self.get_temporal_coverage_form()
update_action = "/hsapi/_internal/NetCDFLogicalFile/{0}/{1}/{2}/update-file-metadata/"
create_action = "/hsapi/_internal/NetCDFLogicalFile/{0}/{1}/add-file-metadata/"
if self.temporal_coverage:
temp_action = update_action.format(self.logical_file.id, "coverage",
self.temporal_coverage.id)
else:
temp_action = create_action.format(self.logical_file.id, "coverage")
temp_cov_form.action = temp_action
orig_cov_form = self.get_original_coverage_form()
if self.originalCoverage:
temp_action = update_action.format(self.logical_file.id, "originalcoverage",
self.originalCoverage.id)
else:
temp_action = create_action.format(self.logical_file.id, "originalcoverage")
orig_cov_form.action = temp_action
spatial_cov_form = self.get_spatial_coverage_form(allow_edit=True)
if self.spatial_coverage:
temp_action = update_action.format(self.logical_file.id, "coverage",
self.spatial_coverage.id)
else:
temp_action = create_action.format(self.logical_file.id, "coverage")
spatial_cov_form.action = temp_action
context_dict = dict()
context_dict["temp_form"] = temp_cov_form
context_dict["orig_coverage_form"] = orig_cov_form
context_dict["spatial_coverage_form"] = spatial_cov_form
context_dict["variable_formset_forms"] = self.get_variable_formset().forms
context = Context(context_dict)
rendered_html = template.render(context)
return rendered_html
def get_update_netcdf_file_html_form(self):
form_action = "/hsapi/_internal/{}/update-netcdf-file/".format(self.logical_file.id)
style = "display:none;"
self.refresh_from_db()
if self.is_dirty:
style = "margin-bottom:15px"
root_div = div(id="div-netcdf-file-update", cls="row", style=style)
with root_div:
with div(cls="col-sm-12"):
with div(cls="alert alert-warning alert-dismissible", role="alert"):
div("NetCDF file needs to be synced with metadata changes.", cls='space-bottom')
_input(id="metadata-dirty", type="hidden", value=self.is_dirty)
with form(action=form_action, method="post", id="update-netcdf-file"):
button("Update NetCDF File", type="button", cls="btn btn-primary",
id="id-update-netcdf-file")
return root_div
def get_original_coverage_form(self):
return OriginalCoverage.get_html_form(resource=None, element=self.originalCoverage,
file_type=True)
def get_variable_formset(self):
VariableFormSetEdit = formset_factory(
wraps(VariableForm)(partial(VariableForm, allow_edit=True)),
formset=BaseFormSet, extra=0)
variable_formset = VariableFormSetEdit(
initial=list(self.variables.all().values()), prefix='Variable')
for frm in variable_formset.forms:
if len(frm.initial) > 0:
frm.action = "/hsapi/_internal/%s/%s/variable/%s/update-file-metadata/" % (
"NetCDFLogicalFile", self.logical_file.id, frm.initial['id'])
frm.number = frm.initial['id']
return variable_formset
def get_ncdump_html(self):
"""
Generates html code to display the contents of the ncdump text file. The generated html
is used for netcdf file type metadata view and edit modes.
:return:
"""
nc_dump_div = div()
nc_dump_res_file = None
for f in self.logical_file.files.all():
if f.extension == ".txt":
nc_dump_res_file = f
break
if nc_dump_res_file is not None:
nc_dump_div = div(style="clear: both", cls="content-block")
with nc_dump_div:
legend("NetCDF Header Information")
p(nc_dump_res_file.full_path[33:])
header_info = nc_dump_res_file.resource_file.read()
header_info = header_info.decode('utf-8')
textarea(header_info, readonly="", rows="15",
cls="input-xlarge", style="min-width: 100%; resize: vertical;")
return nc_dump_div
@classmethod
def validate_element_data(cls, request, element_name):
"""overriding the base class method"""
if element_name.lower() not in [el_name.lower() for el_name
in cls.get_supported_element_names()]:
err_msg = "{} is nor a supported metadata element for NetCDF file type"
err_msg = err_msg.format(element_name)
return {'is_valid': False, 'element_data_dict': None, "errors": err_msg}
element_name = element_name.lower()
if element_name == 'variable':
form_data = {}
for field_name in VariableValidationForm().fields:
try:
# when the request comes from the UI, the variable attributes have a prefix of
# '-'
matching_key = [key for key in request.POST if '-' + field_name in key][0]
except IndexError:
if field_name in request.POST:
matching_key = field_name
else:
continue
form_data[field_name] = request.POST[matching_key]
element_form = VariableValidationForm(form_data)
elif element_name == 'originalcoverage':
element_form = OriginalCoverageForm(data=request.POST)
elif element_name == 'coverage' and 'start' not in request.POST:
element_form = CoverageSpatialForm(data=request.POST)
else:
# here we are assuming temporal coverage
element_form = CoverageTemporalForm(data=request.POST)
if element_form.is_valid():
return {'is_valid': True, 'element_data_dict': element_form.cleaned_data}
else:
return {'is_valid': False, 'element_data_dict': None, "errors": element_form.errors}
class NetCDFLogicalFile(AbstractLogicalFile):
metadata = models.OneToOneField(NetCDFFileMetaData, related_name="logical_file")
data_type = "Multidimensional"
@classmethod
def get_allowed_uploaded_file_types(cls):
"""only .nc file can be set to this logical file group"""
return [".nc"]
@classmethod
def get_main_file_type(cls):
"""The main file type for this aggregation"""
return ".nc"
@classmethod
def get_allowed_storage_file_types(cls):
"""file types allowed in this logical file group are: .nc and .txt"""
return [".nc", ".txt"]
@staticmethod
def get_aggregation_display_name():
return 'Multidimensional Content: A multidimensional dataset represented by a NetCDF ' \
'file (.nc) and text file giving its NetCDF header content'
@staticmethod
def get_aggregation_term_label():
return "Multidimensional Aggregation"
@staticmethod
def get_aggregation_type_name():
return "MultidimensionalAggregation"
# used in discovery faceting to aggregate native and composite content types
@staticmethod
def get_discovery_content_type():
"""Return a human-readable content type for discovery.
This must agree between Composite Types and native types.
"""
return "Multidimensional (NetCDF)"
@classmethod
def create(cls, resource):
"""this custom method MUST be used to create an instance of this class"""
netcdf_metadata = NetCDFFileMetaData.objects.create(keywords=[], extra_metadata={})
# Note we are not creating the logical file record in DB at this point
# the caller must save this to DB
return cls(metadata=netcdf_metadata, resource=resource)
@property
def supports_resource_file_move(self):
"""resource files that are part of this logical file can't be moved"""
return False
@property
def supports_resource_file_add(self):
"""doesn't allow a resource file to be added"""
return False
@property
def supports_resource_file_rename(self):
"""resource files that are part of this logical file can't be renamed"""
return False
@property
def supports_delete_folder_on_zip(self):
"""does not allow the original folder to be deleted upon zipping of that folder"""
return False
def update_netcdf_file(self, user):
"""
writes metadata to the netcdf file associated with this instance of the logical file
:return:
"""
log = logging.getLogger()
nc_res_file = ''
txt_res_file = ''
for f in self.files.all():
if f.extension == '.nc':
nc_res_file = f
break
for f in self.files.all():
if f.extension == '.txt':
txt_res_file = f
break
if not nc_res_file:
msg = "No netcdf file exists for this logical file."
log.exception(msg)
raise ValidationError(msg)
netcdf_file_update(self, nc_res_file, txt_res_file, user)
@classmethod
def check_files_for_aggregation_type(cls, files):
"""Checks if the specified files can be used to set this aggregation type
:param files: a list of ResourceFile objects
:return If the files meet the requirements of this aggregation type, then returns this
aggregation class name, otherwise empty string.
"""
if len(files) != 1:
# no files or more than 1 file
return ""
if files[0].extension not in cls.get_allowed_uploaded_file_types():
return ""
return cls.__name__
@classmethod
def set_file_type(cls, resource, user, file_id=None, folder_path=''):
""" Creates a NetCDFLogicalFile (aggregation) from a netcdf file (.nc) resource file
"""
log = logging.getLogger()
with FileTypeContext(aggr_cls=cls, user=user, resource=resource, file_id=file_id,
folder_path=folder_path,
post_aggr_signal=post_add_netcdf_aggregation,
is_temp_file=True) as ft_ctx:
# base file name (no path included)
res_file = ft_ctx.res_file
file_name = res_file.file_name
# file name without the extension - needed for naming the new aggregation folder
nc_file_name = file_name[:-len(res_file.extension)]
resource_metadata = []
file_type_metadata = []
# file validation and metadata extraction
temp_file = ft_ctx.temp_file
nc_dataset = nc_utils.get_nc_dataset(temp_file)
if isinstance(nc_dataset, netCDF4.Dataset):
msg = "NetCDF aggregation. Error when creating aggregation. Error:{}"
# extract the metadata from netcdf file
res_dublin_core_meta, res_type_specific_meta = nc_meta.get_nc_meta_dict(temp_file)
# populate resource_metadata and file_type_metadata lists with extracted metadata
add_metadata_to_list(resource_metadata, res_dublin_core_meta,
res_type_specific_meta, file_type_metadata, resource)
# create the ncdump text file
dump_file_name = nc_file_name + "_header_info.txt"
for file in resource.files.filter(file_folder=folder_path):
# look for and delete an existing header_file before creating it below.
fname = os.path.basename(file.resource_file.name)
if fname in dump_file_name:
file.delete()
break
dump_file = create_header_info_txt_file(temp_file, nc_file_name)
file_folder = res_file.file_folder
upload_folder = file_folder
dataset_title = res_dublin_core_meta.get('title', nc_file_name)
with transaction.atomic():
try:
# create a netcdf logical file object
logical_file = cls.create_aggregation(dataset_name=dataset_title,
resource=resource,
res_files=[res_file],
new_files_to_upload=[dump_file],
folder_path=upload_folder)
log.info("NetCDF aggregation creation - a new file was added to the "
"resource.")
# use the extracted metadata to populate resource metadata
for element in resource_metadata:
# here k is the name of the element
# v is a dict of all element attributes/field names and field values
k, v = list(element.items())[0]
if k == 'title':
# update title element
title_element = resource.metadata.title
resource.metadata.update_element('title', title_element.id, **v)
else:
resource.metadata.create_element(k, **v)
log.info("NetCDF Aggregation creation - Resource metadata was saved to DB")
# use the extracted metadata to populate file metadata
for element in file_type_metadata:
# here k is the name of the element
# v is a dict of all element attributes/field names and field values
k, v = list(element.items())[0]
if k == 'subject':
logical_file.metadata.keywords = v
logical_file.metadata.save()
# update resource level keywords
resource_keywords = [subject.value.lower() for subject in
resource.metadata.subjects.all()]
for kw in logical_file.metadata.keywords:
if kw.lower() not in resource_keywords:
resource.metadata.create_element('subject', value=kw)
else:
logical_file.metadata.create_element(k, **v)
log.info("NetCDF aggregation - metadata was saved in aggregation")
ft_ctx.logical_file = logical_file
except Exception as ex:
logical_file.remove_aggregation()
msg = msg.format(str(ex))
log.exception(msg)
raise ValidationError(msg)
return logical_file
else:
err_msg = "Not a valid NetCDF file. NetCDF aggregation validation failed."
log.error(err_msg)
raise ValidationError(err_msg)
def remove_aggregation(self):
"""Deletes the aggregation object (logical file) *self* and the associated metadata
object. If the aggregation contains a system generated txt file that resource file also will be
deleted."""
# need to delete the system generated ncdump txt file
txt_file = None
for res_file in self.files.all():
if res_file.file_name.lower().endswith(".txt"):
txt_file = res_file
break
super(NetCDFLogicalFile, self).remove_aggregation()
if txt_file is not None:
txt_file.delete()
@classmethod
def get_primary_resouce_file(cls, resource_files):
"""Gets a resource file that has extension .nc from the list of files *resource_files* """
res_files = [f for f in resource_files if f.extension.lower() == '.nc']
return res_files[0] if res_files else None
def add_metadata_to_list(res_meta_list, extracted_core_meta, extracted_specific_meta,
file_meta_list=None, resource=None):
"""
Helper function to populate metadata lists (*res_meta_list* and *file_meta_list*) with
extracted metadata from the NetCDF file. These metadata lists are then used for creating
metadata element objects by the caller.
:param res_meta_list: a list to store data to create metadata elements at the resource level
:param extracted_core_meta: a dict of extracted dublin core metadata
:param extracted_specific_meta: a dict of extracted metadata that is NetCDF specific
:param file_meta_list: a list to store data to create metadata elements at the file type level
(must be None when this helper function is used for NetCDF resource and must not be None
when used for NetCDF file type
:param resource: an instance of BaseResource (must be None when this helper function is used
for NteCDF resource and must not be None when used for NetCDF file type)
:return:
"""
# add title
if resource is not None and file_meta_list is not None:
# file type
if resource.metadata.title.value.lower() == 'untitled resource':
add_title_metadata(res_meta_list, extracted_core_meta)
else:
# resource type
add_title_metadata(res_meta_list, extracted_core_meta)
# add abstract (Description element)
if resource is not None and file_meta_list is not None:
# file type
if resource.metadata.description is None:
add_abstract_metadata(res_meta_list, extracted_core_meta)
else:
# resource type
add_abstract_metadata(res_meta_list, extracted_core_meta)
# add keywords
if file_meta_list is not None:
# file type
add_keywords_metadata(file_meta_list, extracted_core_meta)
else:
# resource type
add_keywords_metadata(res_meta_list, extracted_core_meta, file_type=False)
# add creators:
if resource is not None:
# file type
add_creators_metadata(res_meta_list, extracted_core_meta,
resource.metadata.creators.all())
else:
# resource type
add_creators_metadata(res_meta_list, extracted_core_meta,
Creator.objects.none())
# add contributors:
if resource is not None:
# file type
add_contributors_metadata(res_meta_list, extracted_core_meta,
resource.metadata.contributors.all())
else:
# resource type
add_contributors_metadata(res_meta_list, extracted_core_meta,
Contributor.objects.none())
# add relation of type 'source' (applies only to NetCDF resource type)
if extracted_core_meta.get('source') and file_meta_list is None:
relation = {'relation': {'type': 'source', 'value': extracted_core_meta['source']}}
res_meta_list.append(relation)
# add relation of type 'references' (applies only to NetCDF resource type)
if extracted_core_meta.get('references') and file_meta_list is None:
relation = {'relation': {'type': 'references',
'value': extracted_core_meta['references']}}
res_meta_list.append(relation)
# add rights (applies only to NetCDF resource type)
if extracted_core_meta.get('rights') and file_meta_list is None:
raw_info = extracted_core_meta.get('rights')
b = re.search("(?P<url>https?://[^\s]+)", raw_info)
url = b.group('url') if b else ''
statement = raw_info.replace(url, '') if url else raw_info
rights = {'rights': {'statement': statement, 'url': url}}
res_meta_list.append(rights)
# add coverage - period
if file_meta_list is not None:
# file type
add_temporal_coverage_metadata(file_meta_list, extracted_core_meta)
else:
# resource type
add_temporal_coverage_metadata(res_meta_list, extracted_core_meta)
# add coverage - box
if file_meta_list is not None:
# file type
add_spatial_coverage_metadata(file_meta_list, extracted_core_meta)
else:
# resource type
add_spatial_coverage_metadata(res_meta_list, extracted_core_meta)
# add variables
if file_meta_list is not None:
# file type
add_variable_metadata(file_meta_list, extracted_specific_meta)
else:
# resource type
add_variable_metadata(res_meta_list, extracted_specific_meta)
# add original spatial coverage
if file_meta_list is not None:
# file type
add_original_coverage_metadata(file_meta_list, extracted_core_meta)
else:
# resource type
add_original_coverage_metadata(res_meta_list, extracted_core_meta)
def add_original_coverage_metadata(metadata_list, extracted_metadata):
"""
Adds data for the original coverage element to the *metadata_list*
:param metadata_list: list to which original coverage data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
ori_cov = {}
if extracted_metadata.get('original-box'):
coverage_data = extracted_metadata['original-box']
projection_string_type = ""
projection_string_text = ""
datum = ""
if extracted_metadata.get('projection-info'):
projection_string_type = extracted_metadata[
'projection-info']['type']
projection_string_text = extracted_metadata[
'projection-info']['text']
datum = extracted_metadata['projection-info']['datum']
ori_cov = {'originalcoverage':
{'value': coverage_data,
'projection_string_type': projection_string_type,
'projection_string_text': projection_string_text,
'datum': datum
}
}
if ori_cov:
metadata_list.append(ori_cov)
def add_creators_metadata(metadata_list, extracted_metadata, existing_creators):
"""
Adds data for creator(s) to the *metadata_list*
:param metadata_list: list to which creator(s) data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:param existing_creators: a QuerySet object for existing creators
:return:
"""
if extracted_metadata.get('creator_name'):
name = extracted_metadata['creator_name']
# add creator only if there is no creator already with the same name
if not existing_creators.filter(name=name).exists():
email = extracted_metadata.get('creator_email', '')
url = extracted_metadata.get('creator_url', '')
creator = {'creator': {'name': name, 'email': email, 'homepage': url}}
metadata_list.append(creator)
def add_contributors_metadata(metadata_list, extracted_metadata, existing_contributors):
"""
Adds data for contributor(s) to the *metadata_list*
:param metadata_list: list to which contributor(s) data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:param existing_contributors: a QuerySet object for existing contributors
:return:
"""
if extracted_metadata.get('contributor_name'):
name_list = extracted_metadata['contributor_name'].split(',')
for name in name_list:
# add contributor only if there is no contributor already with the
# same name
if not existing_contributors.filter(name=name).exists():
contributor = {'contributor': {'name': name}}
metadata_list.append(contributor)
def add_title_metadata(metadata_list, extracted_metadata):
"""
Adds data for the title element to the *metadata_list*
:param metadata_list: list to which title data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
if extracted_metadata.get('title'):
res_title = {'title': {'value': extracted_metadata['title']}}
metadata_list.append(res_title)
def add_abstract_metadata(metadata_list, extracted_metadata):
"""
Adds data for the abstract (Description) element to the *metadata_list*
:param metadata_list: list to which abstract data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
if extracted_metadata.get('description'):
description = {'description': {'abstract': extracted_metadata['description']}}
metadata_list.append(description)
def add_variable_metadata(metadata_list, extracted_metadata):
"""
Adds variable(s) related data to the *metadata_list*
:param metadata_list: list to which variable data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
for var_name, var_meta in list(extracted_metadata.items()):
meta_info = {}
for element, value in list(var_meta.items()):
if value != '':
meta_info[element] = value
metadata_list.append({'variable': meta_info})
def add_spatial_coverage_metadata(metadata_list, extracted_metadata):
"""
Adds data for one spatial coverage metadata element to the *metadata_list**
:param metadata_list: list to which spatial coverage data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
if extracted_metadata.get('box'):
box = {'coverage': {'type': 'box', 'value': extracted_metadata['box']}}
metadata_list.append(box)
def add_temporal_coverage_metadata(metadata_list, extracted_metadata):
"""
Adds data for one temporal metadata element to the *metadata_list*
:param metadata_list: list to which temporal coverage data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:return:
"""
if extracted_metadata.get('period'):
period = {
'coverage': {'type': 'period', 'value': extracted_metadata['period']}}
metadata_list.append(period)
def add_keywords_metadata(metadata_list, extracted_metadata, file_type=True):
"""
Adds data for subject/keywords element to the *metadata_list*
:param metadata_list: list to which keyword data needs to be added
:param extracted_metadata: a dict containing netcdf extracted metadata
:param file_type: If True then this metadata extraction is for netCDF file type, otherwise
metadata extraction is for NetCDF resource
:return:
"""
if extracted_metadata.get('subject'):
keywords = extracted_metadata['subject'].split(',')
if file_type:
metadata_list.append({'subject': keywords})
else:
for keyword in keywords:
metadata_list.append({'subject': {'value': keyword}})
def create_header_info_txt_file(nc_temp_file, nc_file_name):
"""
Creates the header text file using the *nc_temp_file*
:param nc_temp_file: the netcdf file copied from irods to django
for metadata extraction
:return:
"""
if nc_dump.get_nc_dump_string_by_ncdump(nc_temp_file):
dump_str = nc_dump.get_nc_dump_string_by_ncdump(nc_temp_file)
else:
dump_str = nc_dump.get_nc_dump_string(nc_temp_file)
# file name without the extension
temp_dir = os.path.dirname(nc_temp_file)
dump_file_name = nc_file_name + '_header_info.txt'
dump_file = os.path.join(temp_dir, dump_file_name)
if dump_str:
# refine dump_str first line
first_line = list('netcdf {0} '.format(nc_file_name))
first_line_index = dump_str.index('{')
dump_str_list = first_line + list(dump_str)[first_line_index:]
dump_str = "".join(dump_str_list)
with open(dump_file, 'w') as dump_file_obj:
dump_file_obj.write(dump_str)
dump_file_obj.close()
else:
with open(dump_file, 'w') as dump_file_obj:
dump_file_obj.write("")
dump_file_obj.close()
return dump_file
def netcdf_file_update(instance, nc_res_file, txt_res_file, user):
log = logging.getLogger()
# check the instance type
file_type = isinstance(instance, NetCDFLogicalFile)
# get the file from irods to temp dir
temp_nc_file = utils.get_file_from_irods(nc_res_file)
nc_dataset = netCDF4.Dataset(temp_nc_file, 'a')
try:
# update title
title = instance.dataset_name if file_type else instance.metadata.title.value
if title.lower() != 'untitled resource':
if hasattr(nc_dataset, 'title'):
delattr(nc_dataset, 'title')
nc_dataset.title = title
# update keywords
keywords = instance.metadata.keywords if file_type \
else [item.value for item in instance.metadata.subjects.all()]
if hasattr(nc_dataset, 'keywords'):
delattr(nc_dataset, 'keywords')
if keywords:
nc_dataset.keywords = ', '.join(keywords)
# update key/value metadata
extra_metadata_dict = instance.metadata.extra_metadata if file_type \
else instance.extra_metadata
if hasattr(nc_dataset, 'hs_extra_metadata'):
delattr(nc_dataset, 'hs_extra_metadata')
if extra_metadata_dict:
extra_metadata = []
for k, v in list(extra_metadata_dict.items()):
extra_metadata.append("{}:{}".format(k, v))
nc_dataset.hs_extra_metadata = ', '.join(extra_metadata)
# update temporal coverage
temporal_coverage = instance.metadata.temporal_coverage if file_type \
else instance.metadata.coverages.all().filter(type='period').first()
for attr_name in ['time_coverage_start', 'time_coverage_end']:
if hasattr(nc_dataset, attr_name):
delattr(nc_dataset, attr_name)
if temporal_coverage:
nc_dataset.time_coverage_start = temporal_coverage.value['start']
nc_dataset.time_coverage_end = temporal_coverage.value['end']
# update spatial coverage
spatial_coverage = instance.metadata.spatial_coverage if file_type \
else instance.metadata.coverages.all().filter(type='box').first()
for attr_name in ['geospatial_lat_min', 'geospatial_lat_max', 'geospatial_lon_min',
'geospatial_lon_max']:
if hasattr(nc_dataset, attr_name):
delattr(nc_dataset, attr_name)
if spatial_coverage:
nc_dataset.geospatial_lat_min = spatial_coverage.value['southlimit']
nc_dataset.geospatial_lat_max = spatial_coverage.value['northlimit']
nc_dataset.geospatial_lon_min = spatial_coverage.value['westlimit']
nc_dataset.geospatial_lon_max = spatial_coverage.value['eastlimit']
# update variables
if instance.metadata.variables.all():
dataset_variables = nc_dataset.variables
for variable in instance.metadata.variables.all():
if variable.name in list(dataset_variables.keys()):
dataset_variable = dataset_variables[variable.name]
# update units
if hasattr(dataset_variable, 'units'):
delattr(dataset_variable, 'units')
if variable.unit != 'Unknown':
dataset_variable.setncattr('units', variable.unit)
# update long_name
if hasattr(dataset_variable, 'long_name'):
delattr(dataset_variable, 'long_name')
if variable.descriptive_name:
dataset_variable.setncattr('long_name', variable.descriptive_name)
# update method
if hasattr(dataset_variable, 'comment'):
delattr(dataset_variable, 'comment')
if variable.method:
dataset_variable.setncattr('comment', variable.method)
# update missing value
if variable.missing_value:
if hasattr(dataset_variable, 'missing_value'):
missing_value = dataset_variable.missing_value
delattr(dataset_variable, 'missing_value')
else:
missing_value = ''
try:
dt = np.dtype(dataset_variable.datatype.name)
missing_value = np.fromstring(variable.missing_value + ' ',
dtype=dt.type, sep=" ")
except:
pass
if missing_value:
dataset_variable.setncattr('missing_value', missing_value)
# Update metadata element that only apply to netCDF resource
if not file_type:
# update summary
if hasattr(nc_dataset, 'summary'):
delattr(nc_dataset, 'summary')
if instance.metadata.description:
nc_dataset.summary = instance.metadata.description.abstract
# update contributor
if hasattr(nc_dataset, 'contributor_name'):
delattr(nc_dataset, 'contributor_name')
contributor_list = instance.metadata.contributors.all()
if contributor_list:
res_contri_name = []
for contributor in contributor_list:
res_contri_name.append(contributor.name)
nc_dataset.contributor_name = ', '.join(res_contri_name)
# update creator
for attr_name in ['creator_name', 'creator_email', 'creator_url']:
if hasattr(nc_dataset, attr_name):
delattr(nc_dataset, attr_name)
creator = instance.metadata.creators.all().filter(order=1).first()
if creator:
nc_dataset.creator_name = creator.name if creator.name else creator.organization
if creator.email:
nc_dataset.creator_email = creator.email
if creator.description or creator.homepage:
nc_dataset.creator_url = creator.homepage if creator.homepage \
else 'https://www.hydroshare.org' + creator.description
# update license
if hasattr(nc_dataset, 'license'):
delattr(nc_dataset, 'license')
if instance.metadata.rights:
nc_dataset.license = "{0} {1}".format(instance.metadata.rights.statement,
instance.metadata.rights.url)
# update reference
if hasattr(nc_dataset, 'references'):
delattr(nc_dataset, 'references')
reference_list = instance.metadata.relations.all().filter(type=RelationTypes.references)
if reference_list:
res_meta_ref = []
for reference in reference_list:
res_meta_ref.append(reference.value)
nc_dataset.references = ' \n'.join(res_meta_ref)
# update source
if hasattr(nc_dataset, 'source'):
delattr(nc_dataset, 'source')
source_list = instance.metadata.relations.filter(type=RelationTypes.source).all()
if source_list:
res_meta_source = []
for source in source_list:
res_meta_source.append(source.value)
nc_dataset.source = ' \n'.join(res_meta_source)
# close nc dataset
nc_dataset.close()
except Exception as ex:
log.exception(str(ex))
if os.path.exists(temp_nc_file):
shutil.rmtree(os.path.dirname(temp_nc_file))
raise ex
# create the ncdump text file
nc_file_name = os.path.basename(temp_nc_file).split(".")[0]
temp_text_file = create_header_info_txt_file(temp_nc_file, nc_file_name)
# push the updated nc file and the txt file to iRODS
utils.replace_resource_file_on_irods(temp_nc_file, nc_res_file,
user)
utils.replace_resource_file_on_irods(temp_text_file, txt_res_file,
user)
metadata = instance.metadata
if file_type:
instance.create_aggregation_xml_documents(create_map_xml=False)
metadata.is_dirty = False
metadata.save()
# cleanup the temp dir
if os.path.exists(temp_nc_file):
shutil.rmtree(os.path.dirname(temp_nc_file))
|
hydroshare/hydroshare
|
hs_file_types/models/netcdf.py
|
Python
|
bsd-3-clause
| 44,595
|
[
"NetCDF"
] |
6bb452f85bebf0b2225e0d316a03436cb9f804c78f57d1902166aa84fbf858a0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import os
import unittest
from pymatgen.io.lammps.input import LammpsInput
__author__ = 'Kiran Mathew'
__email__ = '[email protected]'
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files", "lammps")
class TestLammpsInput(unittest.TestCase):
def setUp(self):
self.template_file = os.path.join(test_dir, "in.peptide.template")
self.settings = {
"pair_style": "lj/charmm/coul/long 8.0 10.0 10.0",
"kspace_style": "pppm 0.0001",
"fix_1": "1 all nvt temp 275.0 275.0 100.0 tchain 1",
"fix_2": "2 all shake 0.0001 10 100 b 4 6 8 10 12 14 18 a 31"
}
self.lammps_input = LammpsInput.from_file(self.template_file, self.settings)
def test_as_dict(self):
d = self.lammps_input.as_dict()
d.pop("@class")
d.pop("@module")
d_test = {}
with open(os.path.join(test_dir, "in.peptide.template.with_read_data"), "r") as f:
d_test["contents"] = f.read() + "\nlog $${log_file}"
d_test["settings"] = self.settings
d_test["settings"]["data_file"] = "data.peptide"
d_test["delimiter"] = "$$"
self.assertDictEqual(d, d_test)
def test_read_data_placeholder(self):
self.assertIn("data_file", self.lammps_input.settings)
self.assertEqual(self.lammps_input.settings["data_file"], "data.peptide")
def test_log_placeholder(self):
self.assertIn("log_file", self.lammps_input.settings)
self.assertEqual(self.lammps_input.settings["log_file"], "log.lammps")
def test_string_representation(self):
input_file = os.path.join(test_dir, "in.peptide")
input_file_lines = str(self.lammps_input).split("\n")
with open(input_file) as f:
input_file_lines_ans = f.readlines() + ["", "log log.lammps"]
for l1, l2 in zip(input_file_lines, input_file_lines_ans):
self.assertEqual(l1.strip(), l2.strip())
if __name__ == "__main__":
unittest.main()
|
setten/pymatgen
|
pymatgen/io/lammps/tests/test_input.py
|
Python
|
mit
| 2,251
|
[
"CHARMM",
"LAMMPS",
"pymatgen"
] |
76fa6df1f42b5de6ba07d7f660e1be0a0201c4fdd6127119672ab188b84cc363
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 - 2020 by Pedro Mendes, Rector and Visitors of the
# University of Virginia, University of Heidelberg, and University
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
# Copyright (C) 2008 - 2009 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
# Copyright (C) 2006 - 2007 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc. and EML Research, gGmbH.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CDataContainer(unittest.TestCase):
def setUp(self):
self.datamodel=COPASI.CRootContainer.addDatamodel()
self.datamodel.loadModel("calcium_juergen.cps")
def test_ObjectFromName(self):
metab=self.datamodel.getModel().getMetabolite(1)
object=self.datamodel.getObjectFromCN(metab.getCN())
self.assert_(object!=None)
self.assert_(object.__class__==COPASI.CMetab)
self.assert_(metab.getCN().getString()==object.getCN().getString())
def suite():
tests=[
'test_ObjectFromName'
]
return unittest.TestSuite(map(Test_CDataContainer,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
|
copasi/COPASI
|
copasi/bindings/python/unittests/Test_CCopasiContainer.py
|
Python
|
artistic-2.0
| 1,692
|
[
"COPASI"
] |
c819a8e7e086114d36a35f52ac5ff0668320644fd692257f39f6a5f9d4856e16
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Appcelerator Titanium Mobile
# Copyright (c) 2011-2012 by Appcelerator, Inc. All Rights Reserved.
# Licensed under the terms of the Apache Public License
# Please see the LICENSE included with this distribution for details.
#
# Android Application Script
#
import os, sys, shutil, platform, zipfile
import string, subprocess, re
from xml.etree.ElementTree import ElementTree
from StringIO import StringIO
from os.path import join, splitext, split, exists
from shutil import copyfile
from androidsdk import AndroidSDK
from compiler import Compiler
import bindings
this_dir = os.path.dirname(__file__)
module_dir = os.path.join(os.path.dirname(this_dir), 'module')
common_dir = os.path.join(os.path.dirname(this_dir), 'common')
scripts_root = os.path.dirname(this_dir)
tools_root = os.path.dirname(scripts_root)
baseapp_templates = os.path.join(tools_root, "templates", "baseapp")
sys.path.extend([os.path.dirname(this_dir), module_dir, common_dir, os.path.join(tools_root, "thirdparty")])
from mako.template import Template
from tiapp import TiAppXML, touch_tiapp_xml
from manifest import Manifest
from module import ModuleDetector
import simplejson
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn', 'CVS'];
def run(args):
return subprocess.Popen(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0]
def pipe(args1,args2):
p1 = subprocess.Popen(args1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(args2, stdin=p1.stdout, stdout=subprocess.PIPE)
return p2.communicate()[0]
def copy_resources(source, target):
if not os.path.exists(os.path.expanduser(target)):
os.mkdir(os.path.expanduser(target))
for root, dirs, files in os.walk(source):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
from_ = join(root, file)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(split(to_)[0])
if not exists(to_directory):
os.makedirs(to_directory)
print "[TRACE] copying: %s to: %s" % (from_,to_)
copyfile(from_, to_)
class Android(object):
def __init__(self, name, myid, android_sdk_dir, deploy_type, java, ti_sdk_dir):
self.name = name
# android requires at least one dot in packageid
if len(re.findall(r'\.',myid))==0:
myid = 'com.%s' % myid
self.id = myid
self.android_sdk_dir = android_sdk_dir
self.ti_sdk_dir = ti_sdk_dir
bindings.init(os.path.join(ti_sdk_dir, "android"))
# Used in templating
self.config = {
'appid': self.id,
'appname' : self.name,
'appversion' : '1',
'apiversion' : '7', #Android 2.1
'deploy_type': deploy_type,
'compile_js': False
}
self.config['classname'] = Android.strip_classname(self.name)
self.deploy_type = deploy_type
self.java = java
@classmethod
def strip_classname(cls, name):
classname = ''.join([str.capitalize() for str in re.split('[^A-Za-z0-9_]', name)])
if re.search("^[0-9]", classname) != None:
classname = "_" + classname
return classname
def newdir(self, *segments):
path = os.path.join(*segments)
if not os.path.exists(path):
os.makedirs(path)
return path
def copyfile(self, file, src, dest):
shutil.copy(os.path.join(src, file), os.path.join(dest, file))
def load_template(self, template):
return Template(filename=template, output_encoding='utf-8', encoding_errors='replace')
def render_android_manifest(self):
this_dir = os.path.dirname(sys._getframe(0).f_code.co_filename)
tmpl = self.load_template(os.path.join(baseapp_templates, 'android', 'native', 'AndroidManifest.xml'))
return tmpl.render(config = self.config)
def render(self, this_dir, template_file, dest, dest_file, **kwargs):
tmpl = self.load_template(os.path.join(baseapp_templates, 'android', 'native', template_file))
f = None
try:
print "[TRACE] Generating %s" % os.path.join(dest, dest_file)
f = open(os.path.join(dest, dest_file), "w")
f.write(tmpl.render(config = self.config, **kwargs))
finally:
if f!=None: f.close
def build_app_info(self, project_dir):
tiapp = ElementTree()
assets_tiappxml = os.path.join(project_dir, 'build', 'android', 'bin', 'assets', 'tiapp.xml')
self.app_info = {'fullscreen':'false','navbar-hidden':'false'}
self.app_properties = {}
if not os.path.exists(assets_tiappxml):
shutil.copy(os.path.join(project_dir, 'tiapp.xml'), assets_tiappxml)
tiapp.parse(open(assets_tiappxml, 'r'))
for key in ['id', 'name', 'version', 'publisher', 'url', 'copyright',
'description', 'icon', 'analytics', 'guid', 'navbar-hidden', 'fullscreen']:
el = tiapp.find(key)
if el != None:
self.app_info[key] = el.text
for property_el in tiapp.findall("property"):
name = property_el.get("name")
type = property_el.get("type")
value = property_el.text
if name == None: continue
if type == None: type = "string"
if value == None: value = ""
self.app_properties[name] = {"type": type, "value": value}
def generate_activities(self, app_package_dir):
if not 'activities' in self.tiapp.android: return
for key in self.tiapp.android['activities'].keys():
activity = self.tiapp.android['activities'][key]
print '[DEBUG] generating activity class: ' + activity['classname']
self.render(this_dir, 'JSActivity.java', app_package_dir, activity['classname']+'.java', activity=activity)
def generate_services(self, app_package_dir):
if not 'services' in self.tiapp.android: return
for key in self.tiapp.android['services'].keys():
service = self.tiapp.android['services'][key]
service_type = service['service_type']
print '[DEBUG] generating service type "%s", class "%s"' %(service_type, service['classname'])
if service_type == 'interval':
self.render(this_dir, 'JSIntervalService.java', app_package_dir, service['classname']+'.java', service=service)
else:
self.render(this_dir, 'JSService.java', app_package_dir, service['classname']+'.java', service=service)
def build_modules_info(self, resources_dir, app_bin_dir, include_all_ti_modules=False):
self.app_modules = []
(modules, external_child_modules) = bindings.get_all_module_bindings()
compiler = Compiler(self.tiapp, resources_dir, self.java, app_bin_dir,
None, os.path.dirname(app_bin_dir),
include_all_modules=include_all_ti_modules,
ti_sdk_dir=self.ti_sdk_dir)
compiler.compile(compile_bytecode=False, info_message=None)
for module in compiler.modules:
module_bindings = []
# TODO: we should also detect module properties
for method in compiler.module_methods:
if method.lower().startswith(module+'.') and '.' not in method:
module_bindings.append(method[len(module)+1:])
module_onAppCreate = None
module_class = None
module_apiName = None
for m in modules.keys():
if modules[m]['fullAPIName'].lower() == module:
module_class = m
module_apiName = modules[m]['fullAPIName']
if 'onAppCreate' in modules[m]:
module_onAppCreate = modules[m]['onAppCreate']
break
if module_apiName == None: continue # module wasn't found
ext_modules = []
if module_class in external_child_modules:
for child_module in external_child_modules[module_class]:
if child_module['fullAPIName'].lower() in compiler.modules:
ext_modules.append(child_module)
self.app_modules.append({
'api_name': module_apiName,
'class_name': module_class,
'bindings': module_bindings,
'external_child_modules': ext_modules,
'on_app_create': module_onAppCreate
})
# discover app modules
detector = ModuleDetector(self.project_dir, self.ti_sdk_dir)
missing, detected_modules = detector.find_app_modules(self.tiapp, 'android')
for missing_module in missing: print '[WARN] Couldn\'t find app module: %s' % missing_module['id']
self.custom_modules = []
for module in detected_modules:
if module.jar == None: continue
module_jar = zipfile.ZipFile(module.jar)
module_bindings = bindings.get_module_bindings(module_jar)
if module_bindings is None: continue
for module_class in module_bindings['modules'].keys():
module_apiName = module_bindings['modules'][module_class]['apiName']
module_proxy = module_bindings['proxies'][module_class]
module_id = module_proxy['proxyAttrs']['id']
module_proxy_class_name = module_proxy['proxyClassName']
module_onAppCreate = None
if 'onAppCreate' in module_proxy:
module_onAppCreate = module_proxy['onAppCreate']
print '[DEBUG] module_id = %s' % module_id
if module_id == module.manifest.moduleid:
# make sure that the module was not built before 1.8.0.1
try:
module_api_version = int(module.manifest.apiversion)
if module_api_version < 2:
print "[ERROR] The 'apiversion' for '%s' in the module manifest is less than version 2. The module was likely built against a Titanium SDK pre 1.8.0.1. Please use a version of the module that has 'apiversion' 2 or greater" % module_id
touch_tiapp_xml(os.path.join(self.project_dir, 'tiapp.xml'))
sys.exit(1)
except(TypeError, ValueError):
print "[ERROR] The 'apiversion' for '%s' in the module manifest is not a valid value. Please use a version of the module that has an 'apiversion' value of 2 or greater set in it's manifest file" % module_id
touch_tiapp_xml(os.path.join(self.project_dir, 'tiapp.xml'))
sys.exit(1)
print '[DEBUG] appending module: %s' % module_class
self.custom_modules.append({
'module_id': module_id,
'module_apiName': module_apiName,
'proxy_name': module_proxy_class_name,
'class_name': module_class,
'manifest': module.manifest,
'on_app_create': module_onAppCreate
})
def create(self, dir, build_time=False, project_dir=None, include_all_ti_modules=False):
this_dir = os.path.dirname(sys._getframe(0).f_code.co_filename)
# Build up output directory tree
if project_dir is None:
project_dir = self.newdir(dir, self.name)
self.project_dir = project_dir
# Paths to Titanium assets that need to be linked into eclipse structure
self.config['ti_tiapp_xml'] = os.path.join(project_dir, 'tiapp.xml')
self.tiapp = TiAppXML(self.config['ti_tiapp_xml'])
resource_dir = os.path.join(project_dir, 'Resources')
self.config['ti_resources_dir'] = resource_dir
json_contents = open(os.path.join(self.ti_sdk_dir, 'android', 'dependency.json')).read()
depends_map = simplejson.loads(json_contents)
runtime = depends_map['runtimes']['defaultRuntime']
if self.tiapp.has_app_property("ti.android.runtime"):
requested_runtime = self.tiapp.get_app_property("ti.android.runtime")
if requested_runtime == "rhino" or requested_runtime == "v8":
runtime = requested_runtime
else:
print "[ERROR] invalid runtime \"" + requested_runtime + "\" requested, must be 'v8' or 'rhino'"
sys.exit(1);
app_build_dir = self.newdir(project_dir, 'build')
app_dir = self.newdir(app_build_dir, 'android')
#if os.path.exists(os.path.join(app_dir,'bin')):
# shutil.rmtree(os.path.join(app_dir,'bin'))
if os.path.exists(os.path.join(app_dir,'src')):
shutil.rmtree(os.path.join(app_dir,'src'))
if os.path.exists(os.path.join(app_dir,'res')):
shutil.rmtree(os.path.join(app_dir,'res'))
app_bin_dir = self.newdir(app_dir, 'bin')
app_lib_dir = self.newdir(app_dir, 'lib')
app_src_dir = self.newdir(app_dir, 'src')
app_res_dir = self.newdir(app_dir, 'res')
app_gen_dir = self.newdir(app_dir, 'gen')
app_bin_classes_dir = self.newdir(app_bin_dir, 'classes')
app_res_drawable_dir = self.newdir(app_res_dir, 'drawable')
app_assets_dir = self.newdir(app_dir, 'assets')
app_package_dir = self.newdir(app_gen_dir, *self.id.split('.'))
app_bin_assets_dir = self.newdir(app_bin_dir, 'assets')
self.build_app_info(project_dir)
self.build_modules_info(resource_dir, app_bin_dir, include_all_ti_modules=include_all_ti_modules)
# Create android source
self.render(this_dir, 'AppInfo.java', app_package_dir, self.config['classname'] + 'AppInfo.java',
app_properties = self.app_properties, app_info = self.app_info)
self.render(this_dir, 'AndroidManifest.xml', app_dir, 'AndroidManifest.xml')
self.render(this_dir, 'App.java', app_package_dir, self.config['classname'] + 'Application.java',
app_modules = self.app_modules, custom_modules = self.custom_modules, runtime = runtime)
self.render(this_dir, 'Activity.java', app_package_dir, self.config['classname'] + 'Activity.java')
self.generate_activities(app_package_dir)
self.generate_services(app_package_dir)
self.render(this_dir, 'classpath', app_dir, '.classpath')
self.render(this_dir, 'project', app_dir, '.project')
self.render(this_dir, 'default.properties', app_dir, 'default.properties')
print "[TRACE] Generating app.json"
f = None
try:
f = open(os.path.join(app_bin_assets_dir, "app.json"), "w")
f.write(simplejson.dumps({"app_modules":self.app_modules}))
finally:
if f is not None:
f.close()
# Don't override a pre-existing .gitignore in case users have their own preferences
# for what should be in it. (LH #2446)
if not os.path.exists(os.path.join(app_dir, '.gitignore')):
self.render(this_dir, 'gitignore', app_dir, '.gitignore')
else:
print "[TRACE] Skipping copying gitignore -> .gitignore because already exists"
android_project_resources = os.path.join(project_dir,'Resources','android')
if build_time==False and os.path.exists(android_project_resources):
shutil.rmtree(android_project_resources)
if not os.path.exists(android_project_resources):
copy_resources(os.path.join(baseapp_templates, "android","resources"), android_project_resources)
if __name__ == '__main__':
# this is for testing only for the time being
if len(sys.argv) != 6 or sys.argv[1]=='--help':
print "Usage: %s <name> <id> <directory> <android_sdk> <titanium_sdk_dir>" % os.path.basename(sys.argv[0])
sys.exit(1)
sdk = AndroidSDK(sys.argv[4])
android = Android(sys.argv[1], sys.argv[2], sdk, None, 'java', sys.argv[5])
android.create(sys.argv[3])
|
appcelerator/titanium_mobile_tooling
|
scripts/android/android.py
|
Python
|
apache-2.0
| 14,251
|
[
"VisIt"
] |
3aaa38262de212ad4fdec28e31ebbbd68084a45d950b976bbc1022b45b53f853
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PTransform and descendants.
A PTransform is an object describing (not executing) a computation. The actual
execution semantics for a transform is captured by a runner object. A transform
object always belongs to a pipeline object.
A PTransform derived class needs to define the expand() method that describes
how one or more PValues are created by the transform.
The module defines a few standard transforms: FlatMap (parallel do),
GroupByKey (group by key), etc. Note that the expand() methods for these
classes contain code that will add nodes to the processing graph associated
with a pipeline.
As support for the FlatMap transform, the module also defines a DoFn
class and wrapper class that allows lambda functions to be used as
FlatMap processing functions.
"""
from __future__ import absolute_import
import copy
import inspect
import operator
import os
import sys
from apache_beam import error
from apache_beam import pvalue
from apache_beam import typehints
from apache_beam.internal import pickler
from apache_beam.internal import util
from apache_beam.transforms.display import HasDisplayData
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.typehints import getcallargs_forhints
from apache_beam.typehints import TypeCheckError
from apache_beam.typehints import validate_composite_type_param
from apache_beam.typehints import WithTypeHints
from apache_beam.typehints.trivial_inference import instance_to_type
class _PValueishTransform(object):
"""Visitor for PValueish objects.
A PValueish is a PValue, or list, tuple, dict of PValuesish objects.
This visits a PValueish, contstructing a (possibly mutated) copy.
"""
def visit(self, node, *args):
return getattr(
self,
'visit_' + node.__class__.__name__,
lambda x, *args: x)(node, *args)
def visit_list(self, node, *args):
return [self.visit(x, *args) for x in node]
def visit_tuple(self, node, *args):
return tuple(self.visit(x, *args) for x in node)
def visit_dict(self, node, *args):
return {key: self.visit(value, *args) for (key, value) in node.items()}
class _SetInputPValues(_PValueishTransform):
def visit(self, node, replacements):
if id(node) in replacements:
return replacements[id(node)]
else:
return super(_SetInputPValues, self).visit(node, replacements)
class _MaterializedDoOutputsTuple(pvalue.DoOutputsTuple):
def __init__(self, deferred, pvalue_cache):
super(_MaterializedDoOutputsTuple, self).__init__(
None, None, deferred._tags, deferred._main_tag)
self._deferred = deferred
self._pvalue_cache = pvalue_cache
def __getitem__(self, tag):
return self._pvalue_cache.get_unwindowed_pvalue(self._deferred[tag])
class _MaterializePValues(_PValueishTransform):
def __init__(self, pvalue_cache):
self._pvalue_cache = pvalue_cache
def visit(self, node):
if isinstance(node, pvalue.PValue):
return self._pvalue_cache.get_unwindowed_pvalue(node)
elif isinstance(node, pvalue.DoOutputsTuple):
return _MaterializedDoOutputsTuple(node, self._pvalue_cache)
else:
return super(_MaterializePValues, self).visit(node)
class GetPValues(_PValueishTransform):
def visit(self, node, pvalues=None):
if pvalues is None:
pvalues = []
self.visit(node, pvalues)
return pvalues
elif isinstance(node, (pvalue.PValue, pvalue.DoOutputsTuple)):
pvalues.append(node)
else:
super(GetPValues, self).visit(node, pvalues)
class ZipPValues(_PValueishTransform):
"""Pairs each PValue in a pvalueish with a value in a parallel out sibling.
Sibling should have the same nested structure as pvalueish. Leaves in
sibling are expanded across nested pvalueish lists, tuples, and dicts.
For example
ZipPValues().visit({'a': pc1, 'b': (pc2, pc3)},
{'a': 'A', 'b', 'B'})
will return
[('a', pc1, 'A'), ('b', pc2, 'B'), ('b', pc3, 'B')]
"""
def visit(self, pvalueish, sibling, pairs=None, context=None):
if pairs is None:
pairs = []
self.visit(pvalueish, sibling, pairs, context)
return pairs
elif isinstance(pvalueish, (pvalue.PValue, pvalue.DoOutputsTuple)):
pairs.append((context, pvalueish, sibling))
else:
super(ZipPValues, self).visit(pvalueish, sibling, pairs, context)
def visit_list(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, (list, tuple)):
for ix, (p, s) in enumerate(zip(
pvalueish, list(sibling) + [None] * len(pvalueish))):
self.visit(p, s, pairs, 'position %s' % ix)
else:
for p in pvalueish:
self.visit(p, sibling, pairs, context)
def visit_tuple(self, pvalueish, sibling, pairs, context):
self.visit_list(pvalueish, sibling, pairs, context)
def visit_dict(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, dict):
for key, p in pvalueish.items():
self.visit(p, sibling.get(key), pairs, key)
else:
for p in pvalueish.values():
self.visit(p, sibling, pairs, context)
class PTransform(WithTypeHints, HasDisplayData):
"""A transform object used to modify one or more PCollections.
Subclasses must define an expand() method that will be used when the transform
is applied to some arguments. Typical usage pattern will be:
input | CustomTransform(...)
The expand() method of the CustomTransform object passed in will be called
with input as an argument.
"""
# By default, transforms don't have any side inputs.
side_inputs = ()
# Used for nullary transforms.
pipeline = None
# Default is unset.
_user_label = None
def __init__(self, label=None):
super(PTransform, self).__init__()
self.label = label
@property
def label(self):
return self._user_label or self.default_label()
@label.setter
def label(self, value):
self._user_label = value
def default_label(self):
return self.__class__.__name__
def with_input_types(self, input_type_hint):
"""Annotates the input type of a PTransform with a type-hint.
Args:
input_type_hint: An instance of an allowed built-in type, a custom class,
or an instance of a typehints.TypeConstraint.
Raises:
TypeError: If 'type_hint' is not a valid type-hint. See
typehints.validate_composite_type_param for further details.
Returns:
A reference to the instance of this particular PTransform object. This
allows chaining type-hinting related methods.
"""
validate_composite_type_param(input_type_hint,
'Type hints for a PTransform')
return super(PTransform, self).with_input_types(input_type_hint)
def with_output_types(self, type_hint):
"""Annotates the output type of a PTransform with a type-hint.
Args:
type_hint: An instance of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
Raises:
TypeError: If 'type_hint' is not a valid type-hint. See
typehints.validate_composite_type_param for further details.
Returns:
A reference to the instance of this particular PTransform object. This
allows chaining type-hinting related methods.
"""
validate_composite_type_param(type_hint, 'Type hints for a PTransform')
return super(PTransform, self).with_output_types(type_hint)
def type_check_inputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'input')
def infer_output_type(self, unused_input_type):
return self.get_type_hints().simple_output_type(self.label) or typehints.Any
def type_check_outputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'output')
def type_check_inputs_or_outputs(self, pvalueish, input_or_output):
hints = getattr(self.get_type_hints(), input_or_output + '_types')
if not hints:
return
arg_hints, kwarg_hints = hints
if arg_hints and kwarg_hints:
raise TypeCheckError(
'PTransform cannot have both positional and keyword type hints '
'without overriding %s._type_check_%s()' % (
self.__class__, input_or_output))
root_hint = (
arg_hints[0] if len(arg_hints) == 1 else arg_hints or kwarg_hints)
for context, pvalue_, hint in ZipPValues().visit(pvalueish, root_hint):
if pvalue_.element_type is None:
# TODO(robertwb): It's a bug that we ever get here. (typecheck)
continue
if hint and not typehints.is_consistent_with(pvalue_.element_type, hint):
at_context = ' %s %s' % (input_or_output, context) if context else ''
raise TypeCheckError(
'%s type hint violation at %s%s: expected %s, got %s' % (
input_or_output.title(), self.label, at_context, hint,
pvalue_.element_type))
def _infer_output_coder(self, input_type=None, input_coder=None):
"""Returns the output coder to use for output of this transform.
Note: this API is experimental and is subject to change; please do not rely
on behavior induced by this method.
The Coder returned here should not be wrapped in a WindowedValueCoder
wrapper.
Args:
input_type: An instance of an allowed built-in type, a custom class, or a
typehints.TypeConstraint for the input type, or None if not available.
input_coder: Coder object for encoding input to this PTransform, or None
if not available.
Returns:
Coder object for encoding output of this PTransform or None if unknown.
"""
# TODO(ccy): further refine this API.
return None
def clone(self, new_label):
"""Clones the current transform instance under a new label."""
transform = copy.copy(self)
transform.label = new_label
return transform
def expand(self, input_or_inputs):
raise NotImplementedError
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s(PTransform)%s%s%s' % (
self.__class__.__name__,
' label=[%s]' % self.label if (hasattr(self, 'label') and
self.label) else '',
' inputs=%s' % str(self.inputs) if (hasattr(self, 'inputs') and
self.inputs) else '',
' side_inputs=%s' % str(self.side_inputs) if self.side_inputs else '')
def _check_pcollection(self, pcoll):
if not isinstance(pcoll, pvalue.PCollection):
raise error.TransformError('Expecting a PCollection argument.')
if not pcoll.pipeline:
raise error.TransformError('PCollection not part of a pipeline.')
def get_windowing(self, inputs):
"""Returns the window function to be associated with transform's output.
By default most transforms just return the windowing function associated
with the input PCollection (or the first input if several).
"""
# TODO(robertwb): Assert all input WindowFns compatible.
return inputs[0].windowing
def __rrshift__(self, label):
return _NamedPTransform(self, label)
def __or__(self, right):
"""Used to compose PTransforms, e.g., ptransform1 | ptransform2."""
if isinstance(right, PTransform):
return ChainedPTransform(self, right)
else:
return NotImplemented
def __ror__(self, left, label=None):
"""Used to apply this PTransform to non-PValues, e.g., a tuple."""
pvalueish, pvalues = self._extract_input_pvalues(left)
pipelines = [v.pipeline for v in pvalues if isinstance(v, pvalue.PValue)]
if pvalues and not pipelines:
deferred = False
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import pipeline
from apache_beam.utils.pipeline_options import PipelineOptions
# pylint: enable=wrong-import-order, wrong-import-position
p = pipeline.Pipeline(
'DirectRunner', PipelineOptions(sys.argv))
else:
if not pipelines:
if self.pipeline is not None:
p = self.pipeline
else:
raise ValueError('"%s" requires a pipeline to be specified '
'as there are no deferred inputs.'% self.label)
else:
p = self.pipeline or pipelines[0]
for pp in pipelines:
if p != pp:
raise ValueError(
'Mixing value from different pipelines not allowed.')
deferred = not getattr(p.runner, 'is_eager', False)
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import Create
# pylint: enable=wrong-import-order, wrong-import-position
replacements = {id(v): p | 'CreatePInput%s' % ix >> Create(v)
for ix, v in enumerate(pvalues)
if not isinstance(v, pvalue.PValue) and v is not None}
pvalueish = _SetInputPValues().visit(pvalueish, replacements)
self.pipeline = p
result = p.apply(self, pvalueish, label)
if deferred:
return result
else:
# Get a reference to the runners internal cache, otherwise runner may
# clean it after run.
cache = p.runner.cache
p.run().wait_until_finish()
return _MaterializePValues(cache).visit(result)
def _extract_input_pvalues(self, pvalueish):
"""Extract all the pvalues contained in the input pvalueish.
Returns pvalueish as well as the flat inputs list as the input may have to
be copied as inspection may be destructive.
By default, recursively extracts tuple components and dict values.
Generally only needs to be overriden for multi-input PTransforms.
"""
# pylint: disable=wrong-import-order
from apache_beam import pipeline
# pylint: enable=wrong-import-order
if isinstance(pvalueish, pipeline.Pipeline):
pvalueish = pvalue.PBegin(pvalueish)
def _dict_tuple_leaves(pvalueish):
if isinstance(pvalueish, tuple):
for a in pvalueish:
for p in _dict_tuple_leaves(a):
yield p
elif isinstance(pvalueish, dict):
for a in pvalueish.values():
for p in _dict_tuple_leaves(a):
yield p
else:
yield pvalueish
return pvalueish, tuple(_dict_tuple_leaves(pvalueish))
class ChainedPTransform(PTransform):
def __init__(self, *parts):
super(ChainedPTransform, self).__init__(label=self._chain_label(parts))
self._parts = parts
def _chain_label(self, parts):
return '|'.join(p.label for p in parts)
def __or__(self, right):
if isinstance(right, PTransform):
# Create a flat list rather than a nested tree of composite
# transforms for better monitoring, etc.
return ChainedPTransform(*(self._parts + (right,)))
else:
return NotImplemented
def expand(self, pval):
return reduce(operator.or_, self._parts, pval)
class PTransformWithSideInputs(PTransform):
"""A superclass for any PTransform (e.g. FlatMap or Combine)
invoking user code.
PTransforms like FlatMap invoke user-supplied code in some kind of
package (e.g. a DoFn) and optionally provide arguments and side inputs
to that code. This internal-use-only class contains common functionality
for PTransforms that fit this model.
"""
def __init__(self, fn, *args, **kwargs):
if isinstance(fn, type) and issubclass(fn, typehints.WithTypeHints):
# Don't treat Fn class objects as callables.
raise ValueError('Use %s() not %s.' % (fn.__name__, fn.__name__))
self.fn = self.make_fn(fn)
# Now that we figure out the label, initialize the super-class.
super(PTransformWithSideInputs, self).__init__()
if (any([isinstance(v, pvalue.PCollection) for v in args]) or
any([isinstance(v, pvalue.PCollection) for v in kwargs.itervalues()])):
raise error.SideInputError(
'PCollection used directly as side input argument. Specify '
'AsIter(pcollection) or AsSingleton(pcollection) to indicate how the '
'PCollection is to be used.')
self.args, self.kwargs, self.side_inputs = util.remove_objects_from_args(
args, kwargs, pvalue.PCollectionView)
self.raw_side_inputs = args, kwargs
# Prevent name collisions with fns of the form '<function <lambda> at ...>'
self._cached_fn = self.fn
# Ensure fn and side inputs are picklable for remote execution.
self.fn = pickler.loads(pickler.dumps(self.fn))
self.args = pickler.loads(pickler.dumps(self.args))
self.kwargs = pickler.loads(pickler.dumps(self.kwargs))
# For type hints, because loads(dumps(class)) != class.
self.fn = self._cached_fn
def with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints):
"""Annotates the types of main inputs and side inputs for the PTransform.
Args:
input_type_hint: An instance of an allowed built-in type, a custom class,
or an instance of a typehints.TypeConstraint.
*side_inputs_arg_hints: A variable length argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
**side_input_kwarg_hints: A dictionary argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
Example of annotating the types of side-inputs:
FlatMap().with_input_types(int, int, bool)
Raises:
TypeError: If 'type_hint' is not a valid type-hint. See
typehints.validate_composite_type_param for further details.
Returns:
A reference to the instance of this particular PTransform object. This
allows chaining type-hinting related methods.
"""
super(PTransformWithSideInputs, self).with_input_types(input_type_hint)
for si in side_inputs_arg_hints:
validate_composite_type_param(si, 'Type hints for a PTransform')
for si in side_input_kwarg_hints.values():
validate_composite_type_param(si, 'Type hints for a PTransform')
self.side_inputs_types = side_inputs_arg_hints
return WithTypeHints.with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints)
def type_check_inputs(self, pvalueish):
type_hints = self.get_type_hints().input_types
if type_hints:
args, kwargs = self.raw_side_inputs
def element_type(side_input):
if isinstance(side_input, pvalue.PCollectionView):
return side_input.element_type
else:
return instance_to_type(side_input)
arg_types = [pvalueish.element_type] + [element_type(v) for v in args]
kwargs_types = {k: element_type(v) for (k, v) in kwargs.items()}
argspec_fn = self.process_argspec_fn()
bindings = getcallargs_forhints(argspec_fn, *arg_types, **kwargs_types)
hints = getcallargs_forhints(argspec_fn, *type_hints[0], **type_hints[1])
for arg, hint in hints.items():
if arg.startswith('%unknown%'):
continue
if hint is None:
continue
if not typehints.is_consistent_with(
bindings.get(arg, typehints.Any), hint):
raise typehints.TypeCheckError(
'Type hint violation for \'%s\': requires %s but got %s for %s'
% (self.label, hint, bindings[arg], arg))
def process_argspec_fn(self):
"""Returns an argspec of the function actually consuming the data.
"""
raise NotImplementedError
def make_fn(self, fn):
# TODO(silviuc): Add comment describing that this is meant to be overriden
# by methods detecting callables and wrapping them in DoFns.
return fn
def default_label(self):
return '%s(%s)' % (self.__class__.__name__, self.fn.default_label())
class CallablePTransform(PTransform):
"""A class wrapper for a function-based transform."""
def __init__(self, fn):
# pylint: disable=super-init-not-called
# This is a helper class for a function decorator. Only when the class
# is called (and __call__ invoked) we will have all the information
# needed to initialize the super class.
self.fn = fn
self._args = ()
self._kwargs = {}
def display_data(self):
res = {'fn': (self.fn.__name__
if hasattr(self.fn, '__name__')
else self.fn.__class__),
'args': DisplayDataItem(str(self._args)).drop_if_default('()'),
'kwargs': DisplayDataItem(str(self._kwargs)).drop_if_default('{}')}
return res
def __call__(self, *args, **kwargs):
super(CallablePTransform, self).__init__()
self._args = args
self._kwargs = kwargs
return self
def expand(self, pcoll):
# Since the PTransform will be implemented entirely as a function
# (once called), we need to pass through any type-hinting information that
# may have been annotated via the .with_input_types() and
# .with_output_types() methods.
kwargs = dict(self._kwargs)
args = tuple(self._args)
try:
if 'type_hints' in inspect.getargspec(self.fn).args:
args = (self.get_type_hints(),) + args
except TypeError:
# Might not be a function.
pass
return self.fn(pcoll, *args, **kwargs)
def default_label(self):
if self._args:
return '%s(%s)' % (
label_from_callable(self.fn), label_from_callable(self._args[0]))
else:
return label_from_callable(self.fn)
def ptransform_fn(fn):
"""A decorator for a function-based PTransform.
Args:
fn: A function implementing a custom PTransform.
Returns:
A CallablePTransform instance wrapping the function-based PTransform.
This wrapper provides an alternative, simpler way to define a PTransform.
The standard method is to subclass from PTransform and override the expand()
method. An equivalent effect can be obtained by defining a function that
an input PCollection and additional optional arguments and returns a
resulting PCollection. For example::
@ptransform_fn
def CustomMapper(pcoll, mapfn):
return pcoll | ParDo(mapfn)
The equivalent approach using PTransform subclassing::
class CustomMapper(PTransform):
def __init__(self, mapfn):
super(CustomMapper, self).__init__()
self.mapfn = mapfn
def expand(self, pcoll):
return pcoll | ParDo(self.mapfn)
With either method the custom PTransform can be used in pipelines as if
it were one of the "native" PTransforms::
result_pcoll = input_pcoll | 'label' >> CustomMapper(somefn)
Note that for both solutions the underlying implementation of the pipe
operator (i.e., `|`) will inject the pcoll argument in its proper place
(first argument if no label was specified and second argument otherwise).
"""
return CallablePTransform(fn)
def label_from_callable(fn):
if hasattr(fn, 'default_label'):
return fn.default_label()
elif hasattr(fn, '__name__'):
if fn.__name__ == '<lambda>':
return '<lambda at %s:%s>' % (
os.path.basename(fn.func_code.co_filename),
fn.func_code.co_firstlineno)
else:
return fn.__name__
else:
return str(fn)
class _NamedPTransform(PTransform):
def __init__(self, transform, label):
super(_NamedPTransform, self).__init__(label)
self.transform = transform
def __ror__(self, pvalueish):
return self.transform.__ror__(pvalueish, self.label)
def expand(self, pvalue):
raise RuntimeError("Should never be expanded directly.")
|
jasonkuster/incubator-beam
|
sdks/python/apache_beam/transforms/ptransform.py
|
Python
|
apache-2.0
| 24,237
|
[
"VisIt"
] |
ead86e7ea09bfd223da32b4606822850cc2eabfea47200f363f8ca313d38cc20
|
#!/usr/bin/env python
__author__ = "waroquiers"
import unittest
import numpy as np
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.utils.math_utils import (
_cartesian_product,
cosinus_step,
divisors,
get_center_of_arc,
get_linearly_independent_vectors,
power3_step,
powern_parts_step,
prime_factors,
scale_and_clamp,
smootherstep,
smoothstep,
)
class MathUtilsTest(PymatgenTest):
def test_list_cartesian_product(self):
list_of_lists = [[0, 1], [2, 5, 4], [5]]
self.assertEqual(
_cartesian_product(lists=list_of_lists),
[[0, 2, 5], [1, 2, 5], [0, 5, 5], [1, 5, 5], [0, 4, 5], [1, 4, 5]],
)
list_of_lists = [[0, 1], [2, 5, 4], []]
self.assertEqual(_cartesian_product(lists=list_of_lists), [])
list_of_lists = [[1], [3], [2]]
self.assertEqual(_cartesian_product(lists=list_of_lists), [[1, 3, 2]])
list_of_lists = [[7]]
self.assertEqual(_cartesian_product(lists=list_of_lists), [[7]])
def test_math_utils(self):
ff = prime_factors(250)
self.assertEqual(ff, [5, 5, 5, 2])
div = divisors(560)
self.assertEqual(
div,
[
1,
2,
4,
5,
7,
8,
10,
14,
16,
20,
28,
35,
40,
56,
70,
80,
112,
140,
280,
560,
],
)
center = get_center_of_arc([0.0, 0.0], [1.0, 0.0], 0.5)
self.assertEqual(center, (0.5, 0.0))
def test_linearly_independent_vectors(self):
v1 = np.array([1, 0, 0])
v2 = np.array([0, 1, 0])
v3 = np.array([0, 0, 1])
v4 = np.array([-1, 0, 0])
v5 = np.array([1, 1, 0])
independent_vectors = get_linearly_independent_vectors([v1, v2, v3])
self.assertEqual(len(independent_vectors), 3)
independent_vectors = get_linearly_independent_vectors([v1, v2, v4])
self.assertEqual(len(independent_vectors), 2)
independent_vectors = get_linearly_independent_vectors([v1, v2, v5])
self.assertEqual(len(independent_vectors), 2)
independent_vectors = get_linearly_independent_vectors([v1, v2, v3, v4, v5])
self.assertEqual(len(independent_vectors), 3)
def test_scale_and_clamp(self):
edge0 = 7.0
edge1 = 11.0
clamp0 = 0.0
clamp1 = 1.0
vals = np.linspace(5.0, 12.0, num=8)
self.assertEqual(
scale_and_clamp(vals, edge0, edge1, clamp0, clamp1).tolist(),
[0.0, 0.0, 0.0, 0.25, 0.5, 0.75, 1.0, 1.0],
)
def test_smoothstep(self):
vals = np.linspace(5.0, 12.0, num=8)
self.assertEqual(smoothstep(vals, edges=[0.0, 1.0]).tolist(), [1.0] * 8)
self.assertEqual(
smoothstep(vals, edges=[7.0, 11.0]).tolist(),
[0.0, 0.0, 0.0, 0.15625, 0.5, 0.84375, 1.0, 1.0],
)
def test_smootherstep(self):
vals = np.linspace(5.0, 12.0, num=8)
self.assertEqual(smootherstep(vals, edges=[0.0, 1.0]).tolist(), [1.0] * 8)
self.assertEqual(
smootherstep(vals, edges=[7.0, 11.0]).tolist(),
[0.0, 0.0, 0.0, 0.103515625, 0.5, 0.896484375, 1.0, 1.0],
)
def test_power3_step(self):
vals = np.linspace(5.0, 12.0, num=8)
self.assertEqual(power3_step(vals, edges=[0.0, 1.0]).tolist(), [1.0] * 8)
self.assertEqual(
power3_step(vals, edges=[7.0, 11.0]).tolist(),
[0.0, 0.0, 0.0, 0.15625, 0.5, 0.84375, 1.0, 1.0],
)
def test_cosinus_step(self):
vals = np.linspace(5.0, 12.0, num=8)
self.assertEqual(cosinus_step(vals, edges=[0.0, 1.0]).tolist(), [1.0] * 8)
self.assertArrayAlmostEqual(
cosinus_step(vals, edges=[7.0, 11.0]).tolist(),
[0.0, 0.0, 0.0, 0.14644660940672616, 0.5, 0.8535533905932737, 1.0, 1.0],
5,
)
def test_powern_parts_step(self):
vals = np.linspace(5.0, 12.0, num=8)
self.assertEqual(powern_parts_step(vals, edges=[0.0, 1.0], nn=2).tolist(), [1.0] * 8)
self.assertEqual(powern_parts_step(vals, edges=[0.0, 1.0], nn=3).tolist(), [1.0] * 8)
self.assertEqual(powern_parts_step(vals, edges=[0.0, 1.0], nn=4).tolist(), [1.0] * 8)
self.assertEqual(
powern_parts_step(vals, edges=[7.0, 11.0], nn=2).tolist(),
[0.0, 0.0, 0.0, 0.125, 0.5, 0.875, 1.0, 1.0],
)
self.assertEqual(
powern_parts_step(vals, edges=[7.0, 11.0], nn=3).tolist(),
[0.0, 0.0, 0.0, 0.0625, 0.5, 0.9375, 1.0, 1.0],
)
self.assertEqual(
powern_parts_step(vals, edges=[7.0, 11.0], nn=4).tolist(),
[0.0, 0.0, 0.0, 0.03125, 0.5, 0.96875, 1.0, 1.0],
)
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/analysis/chemenv/utils/tests/test_math_utils.py
|
Python
|
mit
| 5,110
|
[
"pymatgen"
] |
b61d969685843b2a112cbd61b35eedbeb0bca13090d8ad4290965cc7bc8cbc4c
|
# Placeholder because util moved
# Remove this in version 1.0
from __future__ import absolute_import
import warnings
with warnings.catch_warnings():
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(("util has moved to MDAnalysis.lib.util "
"and will be removed from here in release 1.0"),
DeprecationWarning)
from ..lib.util import *
|
kain88-de/mdanalysis
|
package/MDAnalysis/core/util.py
|
Python
|
gpl-2.0
| 398
|
[
"MDAnalysis"
] |
ca1387a1088ed4df3f16198a31402f63bf0df7835aa0a71d67ed73f8b9c1c392
|
# tronutils: Utilities library for a TronBot for the Google AI Challenge 2010
# Copyright (C) 2010 Corey Abshire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, sys, random, tron, dijkstra, brandes
from collections import deque
#_____________________________________________________________________
# Constants and Enumerations
#
# Dictionary for translating direction codes to names.
DIR_NAMES = { tron.NORTH : 'NORTH' ,
tron.SOUTH : 'SOUTH' ,
tron.EAST : 'EAST' ,
tron.WEST : 'WEST' }
# Dictionary for translating direction codes to abbreviations.
DIR_ABBRS = dict(zip(DIR_NAMES.keys(), [s[0] for s in DIR_NAMES.values()]))
#_____________________________________________________________________
# Board File I/O
#
def read_board(filename):
"Read a board from a map file."
f = open(filename)
width,height = [int(s) for s in f.readline().strip().split()]
board = [s[:width] for s in f.readlines()]
f.close()
return tron.Board(width, height, board)
def write_board(board, filename):
"Write the given board out to a file in the same format as the maps."
f = open(filename, 'w')
f.write('%d %d\n' % (board.width, board.height))
for line in board.board:
f.write('%s\n' % line)
f.close()
def print_board(board):
"Print the board to standard out in the same format as the maps."
print board.width, board.height
for line in board.board:
print line
def list_files(path):
"Lists all the files in path, including path as the prefix."
return [path + filename for filename in os.listdir(path)]
#_____________________________________________________________________
# Board Manipulation and Querying
#
def valid_coords(board, (y,x)):
"Are the coordinates within the board dimensions?"
return 0 <= y < board.height \
and 0 <= x < board.width
def tile_is_a(kind_of):
"Return a tile matcher that checks if the tile at coords is kind_of."
def fn(board, coords):
if valid_coords(board, coords):
return board[coords] == kind_of
return fn
def invert(predicate):
"Create the logical inverse of the given predicate."
return lambda *args: not predicate(*args)
is_wall = tile_is_a(tron.WALL)
is_floor = tile_is_a(tron.FLOOR)
is_nonwall = invert(tile_is_a(tron.WALL))
def tiles_matching(board, predicate):
"Collect all tiles on the board matching fn."
tiles = []
for y in xrange(board.height):
for x in xrange(board.width):
if predicate(board, (y,x)):
tiles.append((y,x))
return tiles
def adjacent(board, coords, predicate):
"Find all tiles on board adjacent to coords matching the predicate."
return [a for a in board.adjacent(coords) if predicate(board, a)]
def set_char(s, i, c):
"Return a copy of s with the character at index i replaced with c."
return s[:i] + c + s[i+1:]
def apply_move(board, player, move):
"Create a copy of board where move has been applied to player."
lines = [line for line in board.board] # shallow copy
(y1,x1) = board.find(player)
(y2,x2) = board.rel(move, (y1,x1))
lines[y1] = set_char(lines[y1], x1, tron.WALL)
lines[y2] = set_char(lines[y2], x2, player)
return tron.Board(board.width, board.height, lines)
def apply_move_fn(board, move_fn, player=tron.ME, max_len=sys.maxint):
"Apply move_fn repeatedly to build a path on the board."
path = []
while not is_game_over(board) and len(path) < max_len:
move = move_fn(board)
board = apply_move(board, player, move)
coords = board.me()
path.append(coords)
return path
#_____________________________________________________________________
# Board Game Logic
#
def is_game_over(board):
"Determine whether this board is at an end game state."
try:
return not adjacent(board, board.me(), is_floor) \
or not adjacent(board, board.them(), is_floor)
except KeyError:
return True # one player disappears if they crash into each other
def win_lose_or_draw(board, player):
"Did player on board is a win (1), lose (-1), or draw (-0.5)."
try:
me = board.me()
them = board.them()
except KeyError:
return -0.5 # one player disappears if they crash into each other
me_stuck = not adjacent(board, me, is_floor)
them_stuck = not adjacent(board, them, is_floor)
if me_stuck and them_stuck:
return -0.5
elif me_stuck or them_stuck:
if player == tron.ME:
return me_stuck and -1 or 1
else:
return me_stuck and 1 or -1
else:
return -0.5
def opponent(player):
"Determine the opposite player."
if player == tron.ME:
return tron.THEM
else:
return tron.ME
def move_made((y1,x1),(y2,x2)):
"Return the move needed to get from a to b. Assumes adjacency."
if y2 < y1: return tron.NORTH
elif y2 > y1: return tron.SOUTH
elif x2 > x1: return tron.EAST
else : return tron.WEST
def distance((y1, x1), (y2, x2)):
"Compute the distance in moves between two tiles."
return abs(x2 - x1) + abs(y2 - y1)
#_____________________________________________________________________
# Board Analysis
#
def points_around(board, coords, predicate=is_floor):
"All the open spaces around coords."
# http://mail.python.org/pipermail/image-sig/2005-September/003559.html
count = 0
edge = [coords]
seen = set()
while edge:
newedge = []
for tile in edge:
for adj in adjacent(board, tile, is_floor):
if adj not in seen:
count += 1
seen.add(adj)
newedge.append(adj)
edge = newedge
return seen
def count_around(board, coords, predicate=is_floor):
"Count of all spaces around coords."
return len(points_around(board, coords, predicate))
def anticipate(board, coords, pattern, num_moves):
pos = coords
i = 0; j = 0
while i < num_moves:
pos = board.rel(pattern[j], pos)
i += 1
j += 1
if j >= len(pattern):
j = 0
return pos
class Adjacent():
"Dictionary for adjacent tiles on a Tron board."
def __init__(self, board, test):
self.board = board
self.test = test
def __getitem__(self, coords):
return adjacent(self.board, coords, self.test)
def centrality(board):
"Compute betweenness centrality for the floor of a Tron board."
V = tiles_matching(board, is_floor)
A = Adjacent(board, is_floor)
return brandes.brandes(V, A)
#_____________________________________________________________________
# Shortest Path
#
class DijkstraNeighbors():
"Adapter for Dijkstra algorithm implementation. Dict of neighbors."
def __init__(self, neighbors):
self.neighbors = neighbors
def __iter__(self):
return self.neighbors.__iter__()
def __getitem__(self, coords):
return 1 # all neighbors are 1 square away in Tron
class DijkstraGraph():
"Adapter for Dijkstra algorithm implementation. Graph of tiles."
def __init__(self, board, test):
self.board = board
self.test = test
def __getitem__(self, coords):
return DijkstraNeighbors(adjacent(self.board, coords, self.test))
def shortest_path(board, start, end, test=is_nonwall):
"Return the shortest path between two points on the board."
return dijkstra.shortestPath(DijkstraGraph(board, test), start, end)
def moves_between(path):
"Number of moves it would take for two players to traverse the path."
# The path includes both the players tiles, so we just subtract
# those 2 from the length of the path to get the moves between.
return len(path) - 2
def dijkstra_map(board, start, end, test=is_nonwall):
"Run Dijkstra's algorithm and return the distance map."
d, p = dijkstra.Dijkstra(DijkstraGraph(board, test), start, end)
return d
#_____________________________________________________________________
# Depth First Search
#
def articulation_points(board, root):
"Find the points that if were filled would separate the board."
sys.setrecursionlimit(2500)
V = set(); A = Adjacent(board, is_floor)
L = {}; N = {}; c = [0]; P = {}; X = set()
def f(v):
V.add(v)
c[0] += 1
L[v] = N[v] = c[0]
for w in A[v]:
if w not in V:
P[w] = v
f(w)
if v != root and L[w] >= N[v]:
X.add(v)
L[v] = min(L[v], L[w])
else:
if v in P and P[v] != w:
L[v] = min(L[v], N[w])
f(root)
return X
def root_dfs(root, A, visited=None, preorder_process=lambda x: None):
"Given a starting vertex, root, do a depth-first search."
# see http://en.wikipedia.org/wiki/Depth-first_search python impl.
to_visit = []
if visited is None: visited = set()
to_visit.append(root)
while len(to_visit) != 0:
v = to_visit.pop()
if v not in visited:
visited.add(v)
preorder_process(v)
to_visit.extend(A[v])
def touching(t):
"Determine which player directions are touching (connected by floor)."
for c in t:
p = set(p for p,d in c)
if tron.ME in p and tron.THEM in p:
return True
return False
def dfs_count_around(board):
"Use DFS to count all the spaces on the board around either player."
N = [tron.ME, tron.THEM]
A = Adjacent(board, is_floor)
P = [board.me(), board.them()]
C = [{} for p in P]
T = []
remaining = set(A[P[0]] + A[P[1]])
while remaining:
u = remaining.pop()
V = set([])
root_dfs(u, A, V)
c = len(V)
t = []
for i in range(len(P)):
p = P[i]
for a in A[p]:
if a in V:
d = move_made(p, a)
C[i][d] = c
t.append((N[i],d))
if a in remaining:
remaining.remove(a)
T.append(t)
return C[0], C[1], T, touching(T)
def dfs(V,A):
# see CLRS (2nd) p. 541
WHITE, GRAY, BLACK = 0, 1, 2
init = lambda x: dict((u, x) for u in V)
color, pi, d, f = [init(x) for x in (WHITE, None, 0, 0)]
time = [0]
depth = [0]
max_depth = [0]
n = [0]
def visit(u):
depth[0] += 1
n[0] += 1
max_depth[0] = max(max_depth[0], depth[0])
color[u] = GRAY
time[0] += 1
d[u] = time[0]
for v in A[u]:
if color[v] == WHITE:
pi[v] = u
visit(v)
f[u] = time[0] = time[0] + 1
depth[0] -= 1
for u in V:
if color[u] == WHITE:
visit(u)
return d, f, pi, max_depth[0], n[0]
def depth_first_search(board):
"Run DFS on a Tron board. Return starts, finishes, preds, depth, numbering."
V = tiles_matching(board, is_floor)
A = Adjacent(board, is_floor)
return dfs(V, A)
def components(board, predicate=is_floor):
"Return the components on board."
A = Adjacent(board, predicate)
Va = tiles_matching(board, predicate)
d,f,pi = dfs(Va,A)
Vb = f.keys()
Vb.sort(key=lambda x: f[x])
g,h,pi = dfs(Vb,A)
return g,h,pi
#_____________________________________________________________________
# Environment Recognition
#
def find_walls(board):
"Find all the walls (contingous series of wall tiles)."
wall_tiles_remaining = set(tiles_matching(board, is_wall))
walls = []
while wall_tiles_remaining:
wall = set()
rest_of_wall = [wall_tiles_remaining.pop()]
while rest_of_wall:
another = rest_of_wall.pop()
wall.add(another)
adjacent_walls = adjacent(board, another, is_wall)
for x in adjacent_walls:
if x not in wall:
rest_of_wall.append(x)
if x in wall_tiles_remaining:
wall_tiles_remaining.remove(x)
walls.append(wall)
return walls
def heat_map(board, paths=20):
"Identify hotspots by counting coordinate hits on random paths."
points = [p for p in points_around(board, board.me())]
heat = {}
max_heat = 0
for v in points:
heat[v] = 0
for i in xrange(paths):
a = random.choice(points)
b = random.choice(points)
p = shortest_path(board, a, b, is_floor)
for v in p:
heat[v] += 1
if heat[v] > max_heat:
max_heat = heat[v]
return heat
def distance_map(board, coords):
"Find the distance to all floor tiles from coords."
seen = set(coords)
q = deque([coords])
d = { coords: 0 }
while q:
p = q.popleft()
for a in adjacent(board, p, is_floor):
if a not in seen:
seen.add(a)
q.append(a)
d[a] = d[p] + 1
return d
def same_distance(board, a, b):
"Return all points equidistant from a and b."
m = distance_map(board, a)
n = distance_map(board, b)
keys = set(m.keys()).intersection(set(n.keys()))
same = [k for k in keys if m[k] == n[k]]
same.sort(key=lambda k: m[k])
return same
|
jogo279/trobo
|
opponents/corey_abshire/tronutils.py
|
Python
|
bsd-2-clause
| 14,000
|
[
"VisIt"
] |
ba579f8fa1cdffc0f7af739249f72ee10e073215017e723878533f1b3b8cfef6
|
import struct, sys, zlib, StringIO, time
def get_block_bounds(filename):
"""Pre block starts
start 0-indexted, end 1-indexted
:param filename: filename
:type filename: string
:return: 0-index start and 1-index end
:rtype: array of arrays with the [start end] of each block
"""
bs = []
with open(filename,'rb') as inf:
while True:
bytes1 = inf.read(12)
if len(bytes1) < 12: break
bs.append([inf.tell()-12])
gzip_id1,gzip_id2,compression_method,flag,mtime,xfl,osval,xlen=struct.unpack('<BBBBIBBH',bytes1)
# ready to look in extra field
bytes2 = inf.read(xlen) # all the extra field stuff
s = StringIO.StringIO(bytes2)
obsslen = 0
blocksize = 0
while True:
v1 = s.read(4)
if len(v1) == 0:
break
if len(v1) < 4:
sys.stderr.write("lack header values ERROR\n")
return False
s1,s2,slen = struct.unpack('<BBH',v1)
if s1 == 66 and s2 == 67:
has_id = True
obsslen = slen
blocksize = struct.unpack('<H',s.read(slen))[0]
else:
v = s.read(slen)
chunk = inf.read(blocksize-1-xlen-19)
inf.read(9)
bs[-1].append(inf.tell())
return bs
def is_bgzf(filename):
"""Pre: filename to test if it is a bgzf format
Post: True or False
:param filename:
:type filename: string
:return: if its a bgzf
:rtype: bool
"""
with open(filename,'rb') as inf:
bytes1 = inf.read(12)
if len(bytes1) != 12:
sys.stderr.write("File length ERROR\n")
return False
try:
gzip_id1,gzip_id2,compression_method,flag,mtime,xfl,osval,xlen=struct.unpack('<BBBBIBBH',bytes1)
except:
sys.stderr.write("Unpack ERROR\n")
return False
if gzip_id1 != 31:
sys.stderr.write("ID1 ERROR\n")
return False
if gzip_id2 != 139:
sys.stderr.write("ID2 ERROR\n")
return False
if compression_method != 8:
sys.stderr.write("Compression Method ERROR\n")
return False
if flag != 4:
sys.stderr.write("flg ERROR\n")
return False
if xlen < 6:
sys.stderr.write("no extra fields ERROR\n")
# ready to look in extra field
bytes2 = inf.read(xlen) # all the extra field stuff
if len(bytes2) != xlen:
sys.stderr.write("file length ERROR\n")
return False
s = StringIO.StringIO(bytes2)
has_id = False
obsslen = 0
while True:
v1 = s.read(4)
if len(v1) == 0: break
if len(v1) < 4:
sys.stderr.write("lack header values ERROR\n")
return False
s1,s2,slen = struct.unpack('<BBH',v1)
if s1 == 66 and s2 == 67:
has_id = True
obsslen = slen
v = s.read(slen)
if len(v) != slen:
sys.stderr.write("extra field read ERROR\n")
return False
if not has_id or not obsslen == 2:
sys.stderr.write("no proper extra header ERROR\n")
return False
return True
class reader:
""" Methods adapted from biopython's bgzf.py
(optional) blockStart is the byte start location of a block
(optional) innerStart says how far into a decompressed bock to start
:param handle:
:param blockStart: start from here (optional)
:param innerStart: start from here (optional)
:type handle: stream
:type blockStart: int
:type innerStart: int
"""
def __init__(self,handle,blockStart=None,innerStart=None):
self.fh = handle
self._pointer = 0
self._block_start = 0
if blockStart:
self.fh.seek(blockStart)
self._pointer = blockStart
#holds block_size and data
self._buffer = self._load_block()
self._buffer_pos = 0
if innerStart: self._buffer_pos = innerStart
def get_block_start(self):
return self._block_start
def get_inner_start(self):
return self._buffer_pos
def seek(self,blockStart,innerStart):
self.fh.seek(blockStart)
self._pointer = blockStart
self._buffer_pos = 0
self._buffer = self._load_block()
self._buffer_pos = innerStart
def read(self,size):
"""read size bytes and return them"""
done = 0 #number of bytes that have been read so far
v = ''
while True:
if size-done < len(self._buffer['data']) - self._buffer_pos:
v += self._buffer['data'][self._buffer_pos:self._buffer_pos+(size-done)]
self._buffer_pos += (size-done)
#self.pointer += size
return v
else: # we need more buffer
vpart = self._buffer['data'][self._buffer_pos:]
self._buffer = self._load_block()
v += vpart
self._buffer_pos = 0
if len(self._buffer['data'])==0: return v
done += len(vpart)
def _load_block(self):
#pointer_start = self.fh.tell()
if not self.fh: return {'block_size':0,'data':''}
#self._block_start = self.fh.tell()
self._block_start = self._pointer
magic = self.fh.read(4)
self._pointer += 4
if len(magic) < 4:
#print 'end?'
#print len(self.fh.read())
return {'block_size':0,'data':''}
gzip_mod_time, gzip_extra_flags, gzip_os,extra_len = struct.unpack("<LBBH",self.fh.read(8))
self._pointer += 8
pos = 0
block_size = None
#get block_size
while pos < extra_len:
subfield_id = self.fh.read(2)
self._pointer += 2
subfield_len = struct.unpack("<H",self.fh.read(2))[0]
self._pointer += 2
subfield_data = self.fh.read(subfield_len)
self._pointer += subfield_len
pos += subfield_len+4
if subfield_id == 'BC':
block_size = struct.unpack("<H",subfield_data)[0]+1
#print 'blocksize :'+str(block_size)
#block_size is determined
deflate_size = block_size - 1 - extra_len - 19
#deflate_size = block_size - extra_len - 19
d = zlib.decompressobj(-15)
data = d.decompress(self.fh.read(deflate_size))+d.flush()
self._pointer += deflate_size
expected_crc = self.fh.read(4)
self._pointer += 4
expected_size = struct.unpack("<I",self.fh.read(4))[0]
self._pointer += 4
#print len(data)
#print expected_size
if expected_size != len(data):
sys.stderr.write("ERROR unexpected size\n")
sys.exit()
crc = zlib.crc32(data)
if crc < 0: crc = struct.pack("<i",crc)
else: crc = struct.pack("<I",crc)
if crc != expected_crc:
sys.stderr.write("ERROR crc fail\n")
sys.exit()
#print self._pointer-self._block_start
#print 'bsize '+str(block_size)
return {'block_size':block_size, 'data':data}
class writer:
""" Give it the handle of the stream to write to"""
def __init__(self,handle):
#self.path = filename
self.fh = handle
self.buffer_size = 64000
self.buffer = bytearray()
def __del__(self):
self.close()
def write(self,bytes):
self.buffer+=bytes
if len(self.buffer) < self.buffer_size:
return True
while len(self.buffer) >= self.buffer_size:
dobytes = self.buffer[0:self.buffer_size]
self.buffer = self.buffer[self.buffer_size:]
self._do_block(dobytes)
return
def close(self):
if len(self.buffer) == 0: return True
self._do_block(self.buffer)
self.buffer = []
return True
def _do_block(self,bytes):
# now we can output this
isize = len(bytes)
s = StringIO.StringIO(bytes)
d = zlib.compressobj(9,zlib.DEFLATED,-zlib.MAX_WBITS)
data = d.compress(str(bytes))+d.flush()
datasize = len(data)
output = bytearray()
output += struct.pack('<B',31) #IDentifier1
output += struct.pack('<B',139) #IDentifier2
output += struct.pack('<B',8) #Compression Method
output += struct.pack('<B',4) #FLaGs
output += struct.pack('<I',int(time.time())) #Modification TIME
output += struct.pack('<B',0) #eXtra FLags
output += struct.pack('<B',0x03) #Operating System = Unix
output += struct.pack('<H',6) #eXtra LENgth
# Subfields
output += struct.pack('<B',66) #Subfield Identifier 1
output += struct.pack('<B',67) # Subfield Identifier 2
output += struct.pack('<H',2) #Subfield Length
outsize = datasize+19+6
output += struct.pack('<H',outsize) #Total block size minus one
output += data
crc = zlib.crc32(str(bytes))
if crc < 0: output += struct.pack("<i",crc)
else: output+= struct.pack("<I",crc)
output += struct.pack("<I",isize) #isize
self.fh.write(str(output))
|
jason-weirather/py-seq-tools
|
seqtools/format/bgzf.py
|
Python
|
apache-2.0
| 8,243
|
[
"Biopython"
] |
dca01e1424f4078e8dc84cbb000f549247086590dc610295dfd031f833d1be84
|
#-*- coding: utf8
# Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
"""The :mod:`random_layer` module
implements Random Layer transformers.
Random layers are arrays of hidden unit activations that are
random functions of input activation values (dot products for simple
activation functions, distances from prototypes for radial basis
functions).
They are used in the implementation of Extreme Learning Machines (ELMs),
but can be used as a general input mapping.
"""
from abc import ABCMeta, abstractmethod
from math import sqrt
import numpy as np
import scipy.sparse as sp
from scipy.spatial.distance import cdist, pdist, squareform
from sklearn.metrics import pairwise_distances
from sklearn.utils import check_random_state, check_array
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.base import BaseEstimator, TransformerMixin
__all__ = ['RandomLayer',
'MLPRandomLayer',
'RBFRandomLayer',
'GRBFRandomLayer',
]
class BaseRandomLayer(BaseEstimator, TransformerMixin):
"""Abstract Base Class for random layers"""
__metaclass__ = ABCMeta
_internal_activation_funcs = dict()
@classmethod
def activation_func_names(cls):
"""Get list of internal activation function names"""
return cls._internal_activation_funcs.keys()
# take n_hidden and random_state, init components_ and
# input_activations_
def __init__(self, n_hidden=20, random_state=0, activation_func=None,
activation_args=None):
self.n_hidden = n_hidden
self.random_state = random_state
self.activation_func = activation_func
self.activation_args = activation_args
self.components_ = dict()
self.input_activations_ = None
# keyword args for internally defined funcs
self._extra_args = dict()
@abstractmethod
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
@abstractmethod
def _compute_input_activations(self, X):
"""Compute input activations given X"""
# compute input activations and pass them
# through the hidden layer transfer functions
# to compute the transform
def _compute_hidden_activations(self, X):
"""Compute hidden activations given X"""
self._compute_input_activations(X)
acts = self.input_activations_
if (callable(self.activation_func)):
args_dict = self.activation_args if (self.activation_args) else {}
X_new = self.activation_func(acts, **args_dict)
else:
func_name = self.activation_func
func = self._internal_activation_funcs[func_name]
X_new = func(acts, **self._extra_args)
return X_new
# perform fit by generating random components based
# on the input array
def fit(self, X, y=None):
"""Generate a random hidden layer.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training set: only the shape is used to generate random component
values for hidden units
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = check_array(X)
self._generate_components(X)
return self
# perform transformation by calling compute_hidden_activations
# (which will normally call compute_input_activations first)
def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components]
"""
X = check_array(X)
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X)
class RandomLayer(BaseRandomLayer):
"""RandomLayer is a transformer that creates a feature mapping of the
inputs that corresponds to a layer of hidden units with randomly
generated components.
The transformed values are a specified function of input activations
that are a weighted combination of dot product (multilayer perceptron)
and distance (rbf) activations:
input_activation = alpha * mlp_activation + (1-alpha) * rbf_activation
mlp_activation(x) = dot(x, weights) + bias
rbf_activation(x) = rbf_width * ||x - center||/radius
alpha and rbf_width are specified by the user
weights and biases are taken from normal distribution of
mean 0 and sd of 1
centers are taken uniformly from the bounding hyperrectangle
of the inputs, and radii are max(||x-c||)/sqrt(n_centers*2)
The input activation is transformed by a transfer function that defaults
to numpy.tanh if not specified, but can be any callable that returns an
array of the same shape as its argument (the input activation array, of
shape [n_samples, n_hidden]). Functions provided are 'sine', 'tanh',
'tribas', 'inv_tribas', 'sigmoid', 'hardlim', 'softlim', 'gaussian',
'multiquadric', or 'inv_multiquadric'.
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate
`alpha` : float, optional (default=0.5)
Mixing coefficient for distance and dot product input activations:
activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation
`rbf_width` : float, optional (default=1.0)
multiplier on rbf_activation
`user_components`: dictionary, optional (default=None)
dictionary containing values for components that woud otherwise be
randomly generated. Valid key/value pairs are as follows:
'radii' : array-like of shape [n_hidden]
'centers': array-like of shape [n_hidden, n_features]
'biases' : array-like of shape [n_hidden]
'weights': array-like of shape [n_features, n_hidden]
`activation_func` : {callable, string} optional (default='tanh')
Function used to transform input activation
It must be one of 'tanh', 'sine', 'tribas', 'inv_tribas',
'sigmoid', 'hardlim', 'softlim', 'gaussian', 'multiquadric',
'inv_multiquadric' or a callable. If None is given, 'tanh'
will be used.
If a callable is given, it will be used to compute the activations.
`activation_args` : dictionary, optional (default=None)
Supplies keyword arguments for a callable activation_func
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
hidden unit weights at fit time.
Attributes
----------
`input_activations_` : numpy array of shape [n_samples, n_hidden]
Array containing dot(x, hidden_weights) + bias for all samples
`components_` : dictionary containing two keys:
`bias_weights_` : numpy array of shape [n_hidden]
`hidden_weights_` : numpy array of shape [n_features, n_hidden]
See Also
--------
"""
# triangular activation function
_tribas = (lambda x: np.clip(1.0 - np.fabs(x), 0.0, 1.0))
# inverse triangular activation function
_inv_tribas = (lambda x: np.clip(np.fabs(x), 0.0, 1.0))
# sigmoid activation function
_sigmoid = (lambda x: 1.0/(1.0 + np.exp(-x)))
# hard limit activation function
_hardlim = (lambda x: np.array(x > 0.0, dtype=float))
_softlim = (lambda x: np.clip(x, 0.0, 1.0))
# gaussian RBF
_gaussian = (lambda x: np.exp(-pow(x, 2.0)))
# multiquadric RBF
_multiquadric = (lambda x:
np.sqrt(1.0 + pow(x, 2.0)))
# inverse multiquadric RBF
_inv_multiquadric = (lambda x:
1.0/(np.sqrt(1.0 + pow(x, 2.0))))
# internal activation function table
_internal_activation_funcs = {'sine': np.sin,
'tanh': np.tanh,
'tribas': _tribas,
'inv_tribas': _inv_tribas,
'sigmoid': _sigmoid,
'softlim': _softlim,
'hardlim': _hardlim,
'gaussian': _gaussian,
'multiquadric': _multiquadric,
'inv_multiquadric': _inv_multiquadric,
}
def __init__(self, n_hidden=20, alpha=0.5, random_state=None,
activation_func='tanh', activation_args=None,
user_components=None, rbf_width=1.0):
super(RandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args)
if (isinstance(self.activation_func, str)):
func_names = self._internal_activation_funcs.keys()
if (self.activation_func not in func_names):
msg = "unknown activation function '%s'" % self.activation_func
raise ValueError(msg)
self.alpha = alpha
self.rbf_width = rbf_width
self.user_components = user_components
self._use_mlp_input = (self.alpha != 0.0)
self._use_rbf_input = (self.alpha != 1.0)
def _get_user_components(self, key):
"""Look for given user component"""
try:
return self.user_components[key]
except (TypeError, KeyError):
return None
def _compute_radii(self):
"""Generate RBF radii"""
# use supplied radii if present
radii = self._get_user_components('radii')
# compute radii
if (radii is None):
centers = self.components_['centers']
n_centers = centers.shape[0]
max_dist = np.max(pairwise_distances(centers))
radii = np.ones(n_centers) * max_dist/sqrt(2.0 * n_centers)
self.components_['radii'] = radii
def _compute_centers(self, X, sparse, rs):
"""Generate RBF centers"""
# use supplied centers if present
centers = self._get_user_components('centers')
# use points taken uniformly from the bounding
# hyperrectangle
if (centers is None):
n_features = X.shape[1]
if (sparse):
fxr = xrange(n_features)
cols = [X.getcol(i) for i in fxr]
min_dtype = X.dtype.type(1.0e10)
sp_min = lambda col: np.minimum(min_dtype, np.min(col.data))
min_Xs = np.array(map(sp_min, cols))
max_dtype = X.dtype.type(-1.0e10)
sp_max = lambda col: np.maximum(max_dtype, np.max(col.data))
max_Xs = np.array(map(sp_max, cols))
else:
min_Xs = X.min(axis=0)
max_Xs = X.max(axis=0)
spans = max_Xs - min_Xs
ctrs_size = (self.n_hidden, n_features)
centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size)
self.components_['centers'] = centers
def _compute_biases(self, rs):
"""Generate MLP biases"""
# use supplied biases if present
biases = self._get_user_components('biases')
if (biases is None):
b_size = self.n_hidden
biases = rs.normal(size=b_size)
self.components_['biases'] = biases
def _compute_weights(self, X, rs):
"""Generate MLP weights"""
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii()
def _compute_input_activations(self, X):
"""Compute input activations given X"""
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts
class MLPRandomLayer(RandomLayer):
"""Wrapper for RandomLayer with alpha (mixing coefficient) set
to 1.0 for MLP activations only"""
def __init__(self, n_hidden=20, random_state=None,
activation_func='tanh', activation_args=None,
weights=None, biases=None):
user_components = {'weights': weights, 'biases': biases}
super(MLPRandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
alpha=1.0)
class RBFRandomLayer(RandomLayer):
"""Wrapper for RandomLayer with alpha (mixing coefficient) set
to 0.0 for RBF activations only"""
def __init__(self, n_hidden=20, random_state=None,
activation_func='gaussian', activation_args=None,
centers=None, radii=None, rbf_width=1.0):
user_components = {'centers': centers, 'radii': radii}
super(RBFRandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
rbf_width=rbf_width,
alpha=0.0)
class GRBFRandomLayer(RBFRandomLayer):
"""Random Generalized RBF Hidden Layer transformer
Creates a layer of radial basis function units where:
f(a), s.t. a = ||x-c||/r
with c the unit center
and f() is exp(-gamma * a^tau) where tau and r are computed
based on [1]
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate, ignored if centers are provided
`grbf_lambda` : float, optional (default=0.05)
GRBF shape parameter
`gamma` : {int, float} optional (default=1.0)
Width multiplier for GRBF distance argument
`centers` : array of shape (n_hidden, n_features), optional (default=None)
If provided, overrides internal computation of the centers
`radii` : array of shape (n_hidden), optional (default=None)
If provided, overrides internal computation of the radii
`use_exemplars` : bool, optional (default=False)
If True, uses random examples from the input to determine the RBF
centers, ignored if centers are provided
`random_state` : int or RandomState instance, optional (default=None)
Control the pseudo random number generator used to generate the
centers at fit time, ignored if centers are provided
Attributes
----------
`components_` : dictionary containing two keys:
`radii_` : numpy array of shape [n_hidden]
`centers_` : numpy array of shape [n_hidden, n_features]
`input_activations_` : numpy array of shape [n_samples, n_hidden]
Array containing ||x-c||/r for all samples
See Also
--------
ELMRegressor, ELMClassifier, SimpleELMRegressor, SimpleELMClassifier,
SimpleRandomLayer
References
----------
.. [1] Fernandez-Navarro, et al, "MELM-GRBF: a modified version of the
extreme learning machine for generalized radial basis function
neural networks", Neurocomputing 74 (2011), 2502-2510
"""
# def _grbf(acts, taus):
# """GRBF activation function"""
# return np.exp(np.exp(-pow(acts, taus)))
_grbf = (lambda acts, taus: np.exp(np.exp(-pow(acts, taus))))
_internal_activation_funcs = {'grbf': _grbf}
def __init__(self, n_hidden=20, grbf_lambda=0.001,
centers=None, radii=None, random_state=None):
super(GRBFRandomLayer, self).__init__(n_hidden=n_hidden,
activation_func='grbf',
centers=centers, radii=radii,
random_state=random_state)
self.grbf_lambda = grbf_lambda
self.dN_vals = None
self.dF_vals = None
self.tau_vals = None
# get centers from superclass, then calculate tau_vals
# according to ref [1]
def _compute_centers(self, X, sparse, rs):
"""Generate centers, then compute tau, dF and dN vals"""
super(GRBFRandomLayer, self)._compute_centers(X, sparse, rs)
centers = self.components_['centers']
sorted_distances = np.sort(squareform(pdist(centers)))
self.dF_vals = sorted_distances[:, -1]
self.dN_vals = sorted_distances[:, 1]/100.0
#self.dN_vals = 0.0002 * np.ones(self.dF_vals.shape)
tauNum = np.log(np.log(self.grbf_lambda) /
np.log(1.0 - self.grbf_lambda))
tauDenom = np.log(self.dF_vals/self.dN_vals)
self.tau_vals = tauNum/tauDenom
self._extra_args['taus'] = self.tau_vals
# get radii according to ref [1]
def _compute_radii(self):
"""Generate radii"""
denom = pow(-np.log(self.grbf_lambda), 1.0/self.tau_vals)
self.components_['radii'] = self.dF_vals/denom
|
holandajunior/ExtremeLearningMachine
|
Python-ELM/src/random_layer.py
|
Python
|
mit
| 18,813
|
[
"Gaussian"
] |
8ab2cc84500d1f94cdc1b156e880fc52e5446d6275a20da99c17b4e8dc257b2b
|
"""
Support for generating the options for a SelectToolParameter dynamically (based
on the values of other parameters or other aspects of the current state)
"""
import operator, sys, os, logging
import basic, validation
from galaxy.util import string_as_bool
log = logging.getLogger(__name__)
class Filter( object ):
"""
A filter takes the current options list and modifies it.
"""
@classmethod
def from_element( cls, d_option, elem ):
"""Loads the proper filter by the type attribute of elem"""
type = elem.get( 'type', None )
assert type is not None, "Required 'type' attribute missing from filter"
return filter_types[type.strip()]( d_option, elem )
def __init__( self, d_option, elem ):
self.dynamic_option = d_option
self.elem = elem
def get_dependency_name( self ):
"""Returns the name of any depedencies, otherwise None"""
return None
def filter_options( self, options, trans, other_values ):
"""Returns a list of options after the filter is applied"""
raise TypeError( "Abstract Method" )
class StaticValueFilter( Filter ):
"""
Filters a list of options on a column by a static value.
Type: static_value
Required Attributes:
value: static value to compare to
column: column in options to compare with
Optional Attributes:
keep: Keep columns matching value (True)
Discard columns matching value (False)
"""
def __init__( self, d_option, elem ):
Filter.__init__( self, d_option, elem )
self.value = elem.get( "value", None )
assert self.value is not None, "Required 'value' attribute missing from filter"
self.column = elem.get( "column", None )
assert self.column is not None, "Required 'column' attribute missing from filter, when loading from file"
self.column = int ( self.column )
self.keep = string_as_bool( elem.get( "keep", 'True' ) )
def filter_options( self, options, trans, other_values ):
rval = []
for fields in options:
if ( self.keep and fields[self.column] == self.value ) or ( not self.keep and fields[self.column] != self.value ):
rval.append( fields )
return rval
class DataMetaFilter( Filter ):
"""
Filters a list of options on a column by a dataset metadata value.
Type: data_meta
When no 'from_' source has been specified in the <options> tag, this will populate the options list with (meta_value, meta_value, False).
Otherwise, options which do not match the metadata value in the column are discarded.
Required Attributes:
ref: Name of input dataset
key: Metadata key to use for comparison
column: column in options to compare with (not required when not associated with input options)
Optional Attributes:
multiple: Option values are multiple, split column by separator (True)
separator: When multiple split by this (,)
"""
def __init__( self, d_option, elem ):
Filter.__init__( self, d_option, elem )
self.ref_name = elem.get( "ref", None )
assert self.ref_name is not None, "Required 'ref' attribute missing from filter"
d_option.has_dataset_dependencies = True
self.key = elem.get( "key", None )
assert self.key is not None, "Required 'key' attribute missing from filter"
self.column = elem.get( "column", None )
if self.column is None:
assert self.dynamic_option.file_fields is None and self.dynamic_option.dataset_ref_name is None, "Required 'column' attribute missing from filter, when loading from file"
else:
self.column = int ( self.column )
self.multiple = string_as_bool( elem.get( "multiple", "False" ) )
self.separator = elem.get( "separator", "," )
def get_dependency_name( self ):
return self.ref_name
def filter_options( self, options, trans, other_values ):
def compare_meta_value( file_value, dataset_value ):
if isinstance( dataset_value, list ):
if self.multiple:
file_value = file_value.split( self.separator )
for value in dataset_value:
if value not in file_value:
return False
return True
return file_value in dataset_value
if self.multiple:
return dataset_value in file_value.split( self.separator )
return file_value == dataset_value
assert self.ref_name in other_values or ( trans is not None and trans.workflow_building_mode), "Required dependency '%s' not found in incoming values" % self.ref_name
ref = other_values.get( self.ref_name, None )
if not isinstance( ref, self.dynamic_option.tool_param.tool.app.model.HistoryDatasetAssociation ):
return [] #not a valid dataset
meta_value = ref.metadata.get( self.key, None )
if meta_value is None: #assert meta_value is not None, "Required metadata value '%s' not found in referenced dataset" % self.key
return [ ( disp_name, basic.UnvalidatedValue( optval ), selected ) for disp_name, optval, selected in options ]
if self.column is not None:
rval = []
for fields in options:
if compare_meta_value( fields[self.column], meta_value ):
rval.append( fields )
return rval
else:
if not isinstance( meta_value, list ):
meta_value = [meta_value]
for value in meta_value:
options.append( ( value, value, False ) )
return options
class ParamValueFilter( Filter ):
"""
Filters a list of options on a column by the value of another input.
Type: param_value
Required Attributes:
ref: Name of input value
column: column in options to compare with
Optional Attributes:
keep: Keep columns matching value (True)
Discard columns matching value (False)
"""
def __init__( self, d_option, elem ):
Filter.__init__( self, d_option, elem )
self.ref_name = elem.get( "ref", None )
assert self.ref_name is not None, "Required 'ref' attribute missing from filter"
self.column = elem.get( "column", None )
assert self.column is not None, "Required 'column' attribute missing from filter"
self.column = int ( self.column )
self.keep = string_as_bool( elem.get( "keep", 'True' ) )
def get_dependency_name( self ):
return self.ref_name
def filter_options( self, options, trans, other_values ):
if trans is not None and trans.workflow_building_mode: return []
assert self.ref_name in other_values, "Required dependency '%s' not found in incoming values" % self.ref_name
ref = str( other_values.get( self.ref_name, None ) )
rval = []
for fields in options:
if ( self.keep and fields[self.column] == ref ) or ( not self.keep and fields[self.column] != ref ):
rval.append( fields )
return rval
class UniqueValueFilter( Filter ):
"""
Filters a list of options to be unique by a column value.
Type: unique_value
Required Attributes:
column: column in options to compare with
"""
def __init__( self, d_option, elem ):
Filter.__init__( self, d_option, elem )
self.column = elem.get( "column", None )
assert self.column is not None, "Required 'column' attribute missing from filter"
self.column = int ( self.column )
def get_dependency_name( self ):
return self.dynamic_option.dataset_ref_name
def filter_options( self, options, trans, other_values ):
rval = []
skip_list = []
for fields in options:
if fields[self.column] not in skip_list:
rval.append( fields )
skip_list.append( fields[self.column] )
return rval
class MultipleSplitterFilter( Filter ):
"""
Turns a single line of options into multiple lines, by splitting a column and creating a line for each item.
Type: multiple_splitter
Required Attributes:
column: column in options to compare with
Optional Attributes:
separator: Split column by this (,)
"""
def __init__( self, d_option, elem ):
Filter.__init__( self, d_option, elem )
self.separator = elem.get( "separator", "," )
self.columns = elem.get( "column", None )
assert self.columns is not None, "Required 'columns' attribute missing from filter"
self.columns = [ int ( column ) for column in self.columns.split( "," ) ]
def filter_options( self, options, trans, other_values ):
rval = []
for fields in options:
for column in self.columns:
for field in fields[column].split( self.separator ):
rval.append( fields[0:column] + [field] + fields[column:] )
return rval
class AdditionalValueFilter( Filter ):
"""
Adds a single static value to an options list.
Type: add_value
Required Attributes:
value: value to appear in select list
Optional Attributes:
name: Display name to appear in select list (value)
index: Index of option list to add value (APPEND)
"""
def __init__( self, d_option, elem ):
Filter.__init__( self, d_option, elem )
self.value = elem.get( "value", None )
assert self.value is not None, "Required 'value' attribute missing from filter"
self.name = elem.get( "name", None )
if self.name is None:
self.name = self.value
self.index = elem.get( "index", None )
if self.index is not None:
self.index = int( self.index )
def filter_options( self, options, trans, other_values ):
rval = list( options )
add_value = []
for i in range( self.dynamic_option.largest_index + 1 ):
add_value.append( "" )
add_value[self.dynamic_option.columns['value']] = self.value
add_value[self.dynamic_option.columns['name']] = self.name
if self.index is not None:
rval.insert( self.index, add_value )
else:
rval.append( add_value )
return rval
class RemoveValueFilter( Filter ):
"""
Removes a value from an options list.
Type: remove_value
Required Attributes:
value: value to remove from select list
or
ref: param to refer to
or
meta_ref: dataset to refer to
key: metadata key to compare to
"""
def __init__( self, d_option, elem ):
Filter.__init__( self, d_option, elem )
self.value = elem.get( "value", None )
self.ref_name = elem.get( "ref", None )
self.meta_ref = elem.get( "meta_ref", None )
self.metadata_key = elem.get( "key", None )
assert self.value is not None or ( ( self.ref_name is not None or self.meta_ref is not None )and self.metadata_key is not None ), ValueError( "Required 'value' or 'ref' and 'key' attributes missing from filter" )
self.multiple = string_as_bool( elem.get( "multiple", "False" ) )
self.separator = elem.get( "separator", "," )
def filter_options( self, options, trans, other_values ):
if trans is not None and trans.workflow_building_mode: return options
assert self.value is not None or ( self.ref_name is not None and self.ref_name in other_values ) or (self.meta_ref is not None and self.meta_ref in other_values ) or ( trans is not None and trans.workflow_building_mode), Exception( "Required dependency '%s' or '%s' not found in incoming values" % ( self.ref_name, self.meta_ref ) )
def compare_value( option_value, filter_value ):
if isinstance( filter_value, list ):
if self.multiple:
option_value = option_value.split( self.separator )
for value in filter_value:
if value not in filter_value:
return False
return True
return option_value in filter_value
if self.multiple:
return filter_value in option_value.split( self.separator )
return option_value == filter_value
value = self.value
if value is None:
if self.ref_name is not None:
value = other_values.get( self.ref_name )
else:
data_ref = other_values.get( self.meta_ref )
if not isinstance( data_ref, self.dynamic_option.tool_param.tool.app.model.HistoryDatasetAssociation ):
return options #cannot modify options
value = data_ref.metadata.get( self.metadata_key, None )
return [ ( disp_name, optval, selected ) for disp_name, optval, selected in options if not compare_value( optval, value ) ]
class SortByColumnFilter( Filter ):
"""
Sorts an options list by a column
Type: sort_by
Required Attributes:
column: column to sort by
"""
def __init__( self, d_option, elem ):
Filter.__init__( self, d_option, elem )
self.column = elem.get( "column", None )
assert self.column is not None, "Required 'column' attribute missing from filter"
self.column = int( self.column )
def filter_options( self, options, trans, other_values ):
rval = []
for i, fields in enumerate( options ):
for j in range( 0, len( rval ) ):
if fields[self.column] < rval[j][self.column]:
rval.insert( j, fields )
break
else:
rval.append( fields )
return rval
filter_types = dict( data_meta = DataMetaFilter,
param_value = ParamValueFilter,
static_value = StaticValueFilter,
unique_value = UniqueValueFilter,
multiple_splitter = MultipleSplitterFilter,
add_value = AdditionalValueFilter,
remove_value = RemoveValueFilter,
sort_by = SortByColumnFilter )
class DynamicOptions( object ):
"""Handles dynamically generated SelectToolParameter options"""
def __init__( self, elem, tool_param ):
def load_from_parameter( from_parameter, transform_lines = None ):
obj = self.tool_param
for field in from_parameter.split( '.' ):
obj = getattr( obj, field )
if transform_lines:
obj = eval( transform_lines )
return self.parse_file_fields( obj )
self.tool_param = tool_param
self.columns = {}
self.filters = []
self.file_fields = None
self.largest_index = 0
self.dataset_ref_name = None
# True if the options generation depends on one or more other parameters
# that are dataset inputs
self.has_dataset_dependencies = False
self.validators = []
self.converter_safe = True
# Parse the <options> tag
self.separator = elem.get( 'separator', '\t' )
self.line_startswith = elem.get( 'startswith', None )
data_file = elem.get( 'from_file', None )
dataset_file = elem.get( 'from_dataset', None )
from_parameter = elem.get( 'from_parameter', None )
if data_file is not None or dataset_file is not None or from_parameter is not None:
for column_elem in elem.findall( 'column' ):
name = column_elem.get( 'name', None )
assert name is not None, "Required 'name' attribute missing from column def"
index = column_elem.get( 'index', None )
assert index is not None, "Required 'index' attribute missing from column def"
index = int( index )
self.columns[name] = index
if index > self.largest_index:
self.largest_index = index
assert 'value' in self.columns, "Required 'value' column missing from column def"
if 'name' not in self.columns:
self.columns['name'] = self.columns['value']
if data_file is not None:
data_file = data_file.strip()
if not os.path.isabs( data_file ):
data_file = os.path.join( self.tool_param.tool.app.config.tool_data_path, data_file )
self.file_fields = self.parse_file_fields( open( data_file ) )
elif dataset_file is not None:
self.dataset_ref_name = dataset_file
self.has_dataset_dependencies = True
self.converter_safe = False
elif from_parameter is not None:
transform_lines = elem.get( 'transform_lines', None )
self.file_fields = list( load_from_parameter( from_parameter, transform_lines ) )
# Load filters
for filter_elem in elem.findall( 'filter' ):
self.filters.append( Filter.from_element( self, filter_elem ) )
# Load Validators
for validator in elem.findall( 'validator' ):
self.validators.append( validation.Validator.from_element( self.tool_param, validator ) )
def parse_file_fields( self, reader ):
rval = []
for line in reader:
if line.startswith( '#' ) or ( self.line_startswith and not line.startswith( self.line_startswith ) ):
continue
line = line.rstrip( "\n\r" )
if line:
fields = line.split( self.separator )
if self.largest_index < len( fields ):
rval.append( fields )
return rval
def get_dependency_names( self ):
"""
Return the names of parameters these options depend on -- both data
and other param types.
"""
rval = []
if self.dataset_ref_name:
rval.append( self.dataset_ref_name )
for filter in self.filters:
depend = filter.get_dependency_name()
if depend:
rval.append( depend )
return rval
def get_fields( self, trans, other_values ):
if self.dataset_ref_name:
dataset = other_values.get( self.dataset_ref_name, None )
assert dataset is not None, "Required dataset '%s' missing from input" % self.dataset_ref_name
if not dataset: return [] #no valid dataset in history
options = self.parse_file_fields( open( dataset.file_name ) )
else:
options = list( self.file_fields )
for filter in self.filters:
options = filter.filter_options( options, trans, other_values )
return options
def get_options( self, trans, other_values ):
rval = []
if self.file_fields is not None or self.dataset_ref_name is not None:
options = self.get_fields( trans, other_values )
for fields in options:
rval.append( ( fields[self.columns['name']], fields[self.columns['value']], False ) )
else:
for filter in self.filters:
rval = filter.filter_options( rval, trans, other_values )
return rval
|
volpino/Yeps-EURAC
|
lib/galaxy/tools/parameters/dynamic_options.py
|
Python
|
mit
| 19,570
|
[
"Galaxy"
] |
7888874ff11e5b8840f11778224dfe608aaa1b6dedb0282d51d066ebaf288736
|
# Copyright (c) 2015, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
from yambopy.plot import *
import os
class YamboBSEAbsorptionSpectra(YamboSaveDB):
"""
Create a file with information about the excitons from Yambo files
"""
def __init__(self,job_string,path='.'):
"""
Parameters:
job_string - the job_string used for yambo. yambo -J <job_string>
path - the folder where the yambo run was made
"""
#look for the save folder
self.save=path+'/SAVE'
if not os.path.isdir(self.save):
raise ValueError('SAVE folder not found in %s'%self.save)
YamboSaveDB.__init__(self,save=self.save)
self.job_string = job_string
self.data = {"excitons":[],
"lattice": self.lat,
"atypes": self.atomic_numbers,
"atoms": self.atomic_positions}
self.atoms = None
self.excitons = None
#use YamboOut to read the absorption spectra
self.path = path
#try to find o-* files in path, if not use path/job_string
paths = [path, "%s/%s"%(path,job_string)]
for p in paths:
y = YamboOut(p,save_folder=path)
absorptionspectra = y.get_data(('eps','diago'))
#if we read the files then continue
if absorptionspectra != {}:
break
#trap the errors here
if absorptionspectra == {}:
raise ValueError('Could not find the o-*diago*eps files in %s. Make sure you diagonalized the BSE hamiltonian in yambo.'%paths)
#we just use one of them
key = list(absorptionspectra)[0]
for key,value in absorptionspectra[key].items():
self.data[key] = value
def get_excitons(self,min_intensity=0.1,max_energy=4,Degen_Step=0.0):
"""
Obtain the excitons using ypp
Parameters:
min_intensity - Only plot excitons with intensity larger than this value (default: 0.1)
max_energy - Only plot excitons with energy below this value (default: 4 eV)
Degen_Step - Only plot excitons whose energy is different by more that this value (default: 0.0)
"""
filename = "%s/o-%s.exc_E_sorted"%(self.path,self.job_string)
if not os.path.isfile(filename):
os.system("cd %s; ypp -e s -J %s"%(self.path,self.job_string))
self.excitons = np.loadtxt(filename)
#filter with energy
self.excitons = self.excitons[self.excitons[:,0]<max_energy]
#filter with intensity
self.excitons = self.excitons[self.excitons[:,1]>min_intensity]
#filter with degen
if Degen_Step:
#create a list with differences in energy
new_excitons = []
prev_exc = 0
for exc in self.excitons:
e,i,index = exc
#if the energy of this exciton is too diferent then we add it to the list
if abs(e-prev_exc)<Degen_Step:
new_excitons[-1][1] += i
continue
new_excitons.append([e,i,index])
intensity = 0
prev_exc = e
self.excitons = np.array(new_excitons)
#create dictionary with excitons
excitons = self.data["excitons"]
for e,intensity,i in self.excitons:
exciton = {"energy": e,
"intensity": intensity,
"index": i}
excitons.append(exciton)
return self.excitons
def get_wavefunctions(self, FFTGvecs=30,
Cells=[1,1,1], Hole=[0,0,0],
Direction="123", Format="x",
Degen_Step=0.0100,
MinWeight=1e-8,
repx=range(-1,2), repy=range(-1,2), repz=range(-1,2),
wf=False):
"""
Collect all the wavefuncitons with an intensity larger than self.threshold
Parameters:
FFTGvecs - Number of FFTGvecs. Related to how accurate the representation is
Cells - Number of cells to plot in real space
Hole - Define the hole position in cartesian coordinates
Direction - Choose the directions to plot along
Format - Choose the format to plot in. Can be: x for xcrysden or g for gnuplot (default: 'x' for xcrysden)
Degen_Step - Threshold to merge degenerate states. If the difference in energy between the states is smaller than
this value their wavefunctions will be plotted together
repx - Number or repetitions along the x direction
repy - Number or repetitions along the x direction
repz - Number or repetitions along the x direction
wf - Get the wavefuncitons in real space or not (default: False)
"""
if self.excitons is None:
raise ValueError( "Excitons not present. Run YamboBSEAbsorptionSpectra.get_excitons() first" )
self.data["excitons"] = []
#create a ypp file using YamboIn for reading the wavefunction
yppwf = YamboIn('ypp -e w -V all',filename='ypp.in',folder=self.path)
yppwf['Format'] = Format
yppwf['Direction'] = Direction
yppwf['FFTGvecs'] = [FFTGvecs,'Ry']
yppwf['Degen_Step'] = [Degen_Step,'eV']
yppwf['Hole'] = [Hole,'']
yppwf['Cells'] = [Cells,'']
#create a ypp file using YamboIn for reading the excitonic weights
yppew = YamboIn('ypp -e a',filename='ypp.in',folder=self.path)
yppew['MinWeight'] = MinWeight
yppew['Degen_Step'] = Degen_Step
keywords = ["lattice", "atoms", "atypes", "nx", "ny", "nz"]
for exciton in self.excitons:
#get info
e,intensity,i = exciton
if wf:
##############################################################
# Excitonic Wavefunction
##############################################################
#create ypp input for the wavefunction file and run
yppwf["States"] = "%d - %d"%(i,i)
yppwf.write("%s/yppwf_%d.in"%(self.path,i))
filename = "o-%s.exc_%dd_%d%s"%(self.job_string,len(Direction),i,{"g":"","x":".xsf"}[Format] )
print filename
if not os.path.isfile(filename):
os.system("cd %s; ypp -F yppwf_%d.in -J %s"%(self.path,i,self.job_string))
#read the excitonic wavefunction
if Format == 'x':
ewf = YamboExcitonWaveFunctionXSF()
else:
ewf = YamboExcitonWaveFunctionGnuplot()
ewf.read_file("%s/%s"%(self.path,filename))
data = ewf.get_data()
for word in keywords:
if word in data:
self.data[word] = data[word]
#calculate center of mass of atoms
lat = np.array(data["lattice"])
center_atom = np.zeros([3])
for atype,x,y,z in data["atoms"]:
center_atom += np.array([x,y,z])
center_atom /= len(data["atoms"])
center_atom_red = car_red([center_atom],lat)[0]
#shift wavefunctions grid to center of mass
nx = data['nx']
ny = data['ny']
nz = data['nz']
#make center_atom_red commensurate with fft
center_atom_red = center_atom_red * np.array([nx,ny,nz])
center_atom_red_int = [int(x) for x in center_atom_red]
displacement = np.array([nx,ny,nz])/2-center_atom_red_int
dx,dy,dz = displacement
# shift grid
# http://www.xcrysden.org/doc/XSF.html
dg = np.array(data["datagrid"]).reshape([nz,ny,nx])
dg = np.roll(dg,dx,axis=2)
dg = np.roll(dg,dy,axis=1)
dg = np.roll(dg,dz,axis=0)
data["datagrid"] = dg.flatten()
#shift atoms
atoms = []
dx,dy,dz = red_car([displacement/np.array([nx,ny,nz],dtype=float)],lat)[0]
for atype,x,y,z in data["atoms"]:
atoms.append([atype,x+dx,y+dy,z+dz])
self.data["atoms"] = atoms
##############################################################
# Excitonic Amplitudes
##############################################################
#create ypp input for the amplitudes file and run
yppew["States"] = "%d - %d"%(i,i)
yppew.write("%s/yppew_%d.in"%(self.path,i))
filename = "%s/o-%s.exc_weights_at_%d"%(self.path,self.job_string,i)
if not os.path.isfile(filename):
os.system("cd %s; ypp -F yppew_%d.in -J %s"%(self.path,i,self.job_string))
#read the excitonic weigths
ew = YamboExcitonWeight(filename,save=self.save,path=self.path)
qpts, weights = ew.calc_kpts_weights(repx=repx,repy=repy,repz=repz)
############
# Save data
############
exciton = {"energy": e,
"intensity": intensity,
"weights": weights,
"qpts": qpts,
"index": i}
if wf:
exciton["hole"] = Hole
exciton["datagrid"] = np.array(data["datagrid"])
self.data["excitons"].append(exciton)
def write_json(self,filename="absorptionspectra"):
""" Write a jsonfile with the absorption spectra and the wavefunctions of certain excitons
"""
print "writing json file...",
JsonDumper(self.data,"%s.json"%filename)
print "done!"
|
palful/yambopy
|
yambopy/bse/bse_absorption.py
|
Python
|
bsd-3-clause
| 10,079
|
[
"Yambo"
] |
05ad07cfbd62841ee2937f566884c1982b13702148cee9cbf1ee13b48b7e3ece
|
#!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
XNAT Sessions Report
Check for valid scanning sessions and time windows by first caching all the
XNAT session XML files and then parsing these files for necessary info. Note
that to create the XML file cache you need to run with --update
Example
=======
- When running for the first time run
./xnat_sessions_report.py --update
so that the cach (located at experimentsdir) is created
- Update the cache (stored in experimentsdir) and generate the baseline report
./xnat_sessions_report.py --update --baseline
- Use the existing cache to extract 10 in the followup window
./xnat_sessions_report.py --num_extract 10 --min 180 --max 540
"""
from __future__ import print_function
import os
import sys
import pandas as pd
import sibispy
from sibispy import sibislogger as slog
import xnat_extractor as xe
verbose = None
def get_scan_type_pairs(modality):
"""
Get a dictionary of series description based on modality
:param modality: str (anatomy, diffusion, functional)
:return: dict
"""
scan_type_pairs = dict(scan1=None, scan2=None)
if modality == 'anatomy':
t1_scan_types = ['ncanda-t1spgr-v1', 'ncanda-mprage-v1']
t2_scan_types = ['ncanda-t2fse-v1']
scan_type_pairs.update(scan1=t1_scan_types,
scan2=t2_scan_types)
elif modality == 'diffusion':
print("Has to be updated as check does not include dti30b400 - look in redcap/export_measures")
sys.exit()
pepolar = ['ncanda-dti6b500pepolar-v1']
dwi = ['ncanda-dti60b1000-v1']
scan_type_pairs.update(scan1=pepolar,
scan2=dwi)
elif modality == 'functional':
fmri = ['ncanda-rsfmri-v1']
fieldmap = ['ncanda-grefieldmap-v1']
scan_type_pairs.update(scan1=fmri,
scan2=fieldmap)
return scan_type_pairs
def main(args=None):
# TODO: Handle when T1 and T2 are in separate session (i.e., rescan)
# Upload all data experimentsdir
if args.update:
slog.init_log(False, False,'xnat_sesions_report', 'xnat_sesions_report',None)
session = sibispy.Session()
session.configure()
if not session.configure() :
if verbose:
print("Error: session configure file was not found")
sys.exit()
server = session.connect_server('xnat_http', True)
if not server:
print("Error: could not connect to xnat server!")
sys.exit()
xe.extract_experiment_xml(session,args.experimentsdir, args.num_extract)
# extract info from the experiment XML files
experiment = xe.get_experiments_dir_info(args.experimentsdir)
# Scan specific information
scan = xe.get_experiments_dir_scan_info(args.experimentsdir)
# Session info
reading = xe.get_experiments_dir_reading_info(args.experimentsdir)
df = xe.merge_experiments_scans_reading(experiment, scan, reading)
# exclude phantoms, including the traveling human phantoms
site_id_pattern = '[A-E]-[0-9]{5}-[MF]-[0-9]'
df = df[df.site_id.str.contains(site_id_pattern)]
# exclude subjects not part of study
df = df[df['subject_id'] != 'NCANDA_S00127']
if args.unknown :
print("Sessions that have not yet been quality controlled")
scanCheckList = pd.DataFrame()
required_scans = ['ncanda-mprage-v1','ncanda-t1spgr-v1','ncanda-t2fse-v1','ncanda-dti6b500pepolar-v1','ncanda-dti30b400-v1','ncanda-dti60b1000-v1','ncanda-grefieldmap-v1','ncanda-rsfmri-v1']
for eid in df.experiment_id.drop_duplicates():
eid_df = df[df.experiment_id == eid]
eid_df = eid_df[~pd.isnull(eid_df['quality'])]
if not len(eid_df[eid_df['quality'] != 'unknown']) :
print(eid)
else :
unknownScanDF = eid_df[eid_df['quality'] == 'unknown']
mandatoryCheck = unknownScanDF[unknownScanDF['scan_type'].isin(required_scans)]
if len(mandatoryCheck) :
scanCheckList = scanCheckList.append(mandatoryCheck)
print(" ")
print("Mandatory scans that have not yet been quality controlled (status unknown)")
if len(scanCheckList) :
pd.set_option('display.max_rows', len(scanCheckList))
print(scanCheckList['scan_type'])
sys.exit()
if args.ignore_window or args.session_notes or args.scan_notes :
if args.usable :
df = df[df['quality'] == 'usable']
columns = ['site_id', 'subject_id', 'experiment_id', 'experiment_date','excludefromanalysis']
if args.ignore_window or args.scan_notes :
columns = columns + ['scan_id', 'scan_type', 'quality']
if args.scan_notes :
columns = columns + [ 'scan_note']
if args.session_notes :
columns = columns + [ 'note' ]
result = df[columns]
# print result
else :
df.loc[:, 'experiment_date'] = df.experiment_date.astype('datetime64')
result = pd.DataFrame()
for subject_id in df.subject_id.drop_duplicates():
subject_df = df[df.subject_id == subject_id]
# find the earliest exam date for each given subject
grouping = subject_df.groupby('subject_id')
baseline_date = grouping['experiment_date'].nsmallest(1)
baseline_df = subject_df[subject_df.experiment_date == baseline_date[0]]
# Find window for follow-up
day_min = pd.datetools.Day(n=args.min)
day_max = pd.datetools.Day(n=args.max)
followup_min = baseline_df.experiment_date + day_min
followup_max = baseline_df.experiment_date + day_max
df_min = subject_df.experiment_date > followup_min[0]
df_max = subject_df.experiment_date < followup_max[0]
followup_df = subject_df[df_min & df_max]
# Included followup sessions slightly outside window
included = ['NCANDA_E02615', 'NCANDA_E02860']
included_df = subject_df[subject_df.experiment_id.isin(included)]
if included_df.shape[0]:
followup_df = included_df
# Create report for baseline visit
if args.baseline:
followup_df = baseline_df
# filter for specific scan types
scan_type_pairs = get_scan_type_pairs(args.modality)
scan1 = scan_type_pairs.get('scan1')
scan2 = scan_type_pairs.get('scan2')
scan1_df = followup_df[followup_df.scan_type.isin(scan1)]
scan2_df = followup_df[followup_df.scan_type.isin(scan2)]
# Filter quality column
if args.usable :
scan1_selected = scan1_df[scan1_df.quality == 'usable']
scan2_selected = scan2_df[scan2_df.quality == 'usable']
else :
scan1_selected = scan1_df
scan2_selected = scan2_df
# report columns
columns = ['site_id', 'subject_id', 'experiment_id', 'experiment_date',
'excludefromanalysis', 'note', 'scan_type', 'quality',
'scan_note']
scan1_recs = scan1_selected.loc[:, columns].to_records(index=False)
scan2_recs = scan2_selected.loc[:, columns].to_records(index=False)
scan1_report = pd.DataFrame(scan1_recs,
index=scan1_selected.experiment_id)
scan2_report = pd.DataFrame(scan2_recs,
index=scan2_selected.experiment_id)
scan1_scan2_report = scan1_report.join(scan2_report[['scan_type',
'quality',
'scan_note']],
lsuffix='_scan1',
rsuffix='_scan2',
how='inner')
if scan1_scan2_report.shape[0]:
result = result.append(scan1_scan2_report)
#
# Write out results
#
# Remove any duplicate rows due to extra usable scan types (i.e., fieldmaps)
result = result.drop_duplicates()
result.to_csv(args.outfile, index=False)
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(prog='xnat_sessions_report.py',
description=__doc__)
parser.add_argument('-c', '--config',
type=str,
default=os.path.join(os.path.expanduser('~'),
'.server_config', 'ncanda.cfg'))
parser.add_argument('-b', '--baseline',
action='store_true',
help='Create report for baseline visit.')
parser.add_argument('-e', '--experimentsdir',
type=str,
default='/tmp/experiments',
help='Name of experiments xml directory')
parser.add_argument('-m', '--modality',
type=str,
default='anatomy',
choices=['anatomy', 'diffusion', 'functional'],
help='Name of experiments xml directory')
parser.add_argument('--min',
type=int,
default=180,
help='Minimum days from baseline (to specify followup '
'1y, only impacts final report but not -u option)')
parser.add_argument('--max',
type=int,
default=540,
help='Maximum days from baseline (to specify followup '
'1y, only impacts final report but not -u option)')
parser.add_argument('--ignore-window',
action='store_true',
help='Just list sessions regardless of window')
parser.add_argument('--usable',
action='store_true',
help='Only list scans with usable image quality')
parser.add_argument('--unknown',
action='store_true',
help='Only list sessions that have unknown scans, i.e. have not been reviewed')
parser.add_argument('--session-notes',
action='store_true',
help='create report with session notes')
parser.add_argument('--scan-notes',
action='store_true',
help='include scan notes in the report')
parser.add_argument('-o', '--outfile',
type=str,
default='/tmp/usability_report.csv',
help='Name of csv file to write.')
parser.add_argument('-n', '--num_extract',
type=int,
help='Number of sessions to extract (only works in '
'connection with -u)')
parser.add_argument('-u', '--update',
action='store_true',
help='Update the cache of xml files')
parser.add_argument('-v', '--verbose',
action='store_true',
help='Print verbose output.')
argv = parser.parse_args()
verbose = argv.verbose
xe.verbose = argv.verbose
sys.exit(main(args=argv))
|
sibis-platform/ncanda-data-integration
|
scripts/reporting/xnat_sessions_report.py
|
Python
|
bsd-3-clause
| 11,745
|
[
"VisIt"
] |
11924db1891618d2dfaaa732deb10615d7c600498aaa7dc38bfae243972d14a9
|
import numpy as np
import time
import pygmin.utils.readAmberParam as readAmb
import ambgmin_ as GMIN
import pygmin.potentials.gminpotential as gminpot
import pygmin.basinhopping as bh
from pygmin.storage import savenlowest
from pygmin.NEB import NEB
from pygmin.utils.rbtools import *
from pygmin.takestep import generic
from pygmin.takestep import displace
class molSystem:
def __init__(self):
self.storage = savenlowest.SaveN(10)
GMIN.initialize()
# self.bondList = bondList
def createBasinHopping(self):
GMIN.initialize()
pot = gminpot.GMINPotental(GMIN)
coords = pot.getCoords()
step = displace.RandomDisplacement()
opt = bh.BasinHopping(coords, pot, takeStep=step, temperature=0.4, storage=self.storage)
return opt
def drawCylinder(self, X1, X2):
from OpenGL import GL,GLUT, GLU
z = np.array([0.,0.,1.]) #default cylinder orientation
p = X2-X1 #desired cylinder orientation
r = np.linalg.norm(p)
t = np.cross(z,p) #angle about which to rotate
a = np.arccos( np.dot( z,p) / r ) #rotation angle
a *= (180. / np.pi) #change units to angles
GL.glPushMatrix()
GL.glTranslate( X1[0], X1[1], X1[2] )
GL.glRotate( a, t[0], t[1], t[2] )
g=GLU.gluNewQuadric()
GLU.gluCylinder(g, .1,0.1,r,30,30) #I can't seem to draw a cylinder
GL.glPopMatrix()
def draw(self, coordsl, index):
from OpenGL import GL,GLUT
coords=coordsl.reshape(coordsl.size/3,3)
#coords = coords.reshape(GMIN.getNAtoms, 3)
com=np.mean(coords, axis=0)
for xx in coords:
x = xx-com
GL.glPushMatrix()
GL.glTranslate(x[0],x[1],x[2])
GLUT.glutSolidSphere(0.3,30,30)
GL.glPopMatrix()
# get bond list from amber params
mol = readAmb.readAmberParam()
mol.populateBondConn()
# draw bonds
for atomPairs in mol.bondConn:
xyz1 = coords[atomPairs[0]-1] - com
xyz2 = coords[atomPairs[1]-1] - com
self.drawCylinder(xyz1, xyz2)
def createNEB(self, coords1, coords2):
pot = gminpot.GMINPotental(GMIN)
return NEB.NEB(coords1, coords2, pot, k = 100. ,nimages=20)
if __name__ == "__main__":
import pygmin.gui.run as gr
gr.run_gui(molSystem)
|
js850/PyGMIN
|
examples/amber/metenk/run_gui.py
|
Python
|
gpl-3.0
| 2,507
|
[
"Amber"
] |
681272b3e1c4651aba04db353925d435270eee30a73e1ad52434082ca06c6492
|
from Sire.Mol import *
from Sire.IO import *
from Sire.Vol import *
from Sire.FF import *
from Sire.MM import *
from Sire.CAS import *
from Sire.Cluster import *
from Sire.Squire import *
from Sire.Maths import *
from Sire.Qt import *
from Sire.Units import *
import time
timer = QTime()
#read in all of the molecules
print("Loading the molecules...")
timer.start()
mols = PDB().read("test/io/water.pdb")
ms = timer.elapsed()
print("... took %d ms" % ms)
#specify the space in which the molecules are placed
space = Cartesian()
space = PeriodicBox(Vector(-18.3854,-18.66855,-18.4445), \
Vector( 18.3854, 18.66855, 18.4445))
#specify the type of switching function to use
switchfunc = HarmonicSwitchingFunction(80.0)
switchfunc = HarmonicSwitchingFunction(15.0, 14.5)
molproexe = "../../../../../software/molpro/devel/molpro"
#create a forcefield for the molecules
molproff1 = MolproFF( space, switchfunc )
molproff2 = MolproFF( space, switchfunc )
molproff3 = MolproFF( space, switchfunc )
molproff1.setMolproExe(molproexe)
molproff2.setMolproExe(molproexe)
molproff3.setMolproExe(molproexe)
#parametise each molecule and add it to the forcefield
print("Parametising the molecules...")
chgs = AtomicCharges( [0.0, 0.52 * mod_electron, \
0.52 * mod_electron, \
-1.04 * mod_electron] )
ljs = AtomicLJs( [ LJParameter( 3.15365 * angstrom, \
0.1550 * kcal_per_mol ), \
LJParameter.dummy(), \
LJParameter.dummy(), \
LJParameter.dummy() ] )
timer.start()
for mol in mols:
mol.setProperty( "charges", chgs )
mol.setProperty( "ljs", ljs )
qm_mol = mols[0]
mm_mols = mols[1:]
molproff1.addToMM(mm_mols)
molproff2.addToMM(mm_mols)
molproff3.addToMM(mm_mols)
molproff1.addToQM(qm_mol)
molproff2.addToQM(qm_mol)
molproff3.addToQM(qm_mol)
ms = timer.elapsed()
print("... took %d ms" % ms)
timer.start()
#create a thread processor and calculate the energy in the background
threadproc1 = FFThreadProcessor(molproff1)
threadproc2 = FFThreadProcessor(molproff2)
active_threadproc1 = threadproc1.activate()
active_threadproc2 = threadproc2.activate()
print("Starting background calculation...")
active_threadproc1.recalculateEnergy()
active_threadproc2.recalculateEnergy()
print("Off it goes....")
print("Da de da da da...")
#create an FFProcessor, and place the cljff onto it...
ffproc1 = FFProcessor(molproff3)
print("Is active?", ffproc1.isActive())
active_ffproc1 = ffproc1.activate()
print("Is active?", ffproc1.isActive())
print("MAIN THREAD PROCESS")
print("Total energy == ",active_threadproc1.energy())
print("Total energy == ",active_threadproc2.energy())
print("Total energy == ",active_ffproc1.energy())
print("Took %d ms" % timer.elapsed())
|
chryswoods/SireTests
|
unittests/Squire/threadmolpro.py
|
Python
|
gpl-2.0
| 2,844
|
[
"Molpro"
] |
8376436c730a5d72d29c8b4590fc0c11aaf096d873c749c07e465e7f3b786d3a
|
"""Quality control and summary metrics for next-gen alignments and analysis.
"""
import collections
import contextlib
import csv
import os
import shutil
import subprocess
import pandas as pd
import lxml.html
import yaml
from datetime import datetime
# allow graceful during upgrades
try:
import matplotlib
matplotlib.use('Agg', force=True)
import matplotlib.pyplot as plt
plt.ioff()
except ImportError:
plt = None
try:
from fadapa import Fadapa
except ImportError:
Fadapa = None
import pybedtools
import pysam
import toolz as tz
import toolz.dicttoolz as dtz
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.pipeline import config_utils, run_info
from bcbio.install import _get_data_dir
from bcbio.provenance import do
import bcbio.rnaseq.qc
from bcbio.rnaseq.coverage import plot_gene_coverage
import bcbio.pipeline.datadict as dd
from bcbio.variation import bedutils
from bcbio import broad
# ## High level functions to generate summary
def generate_parallel(samples, run_parallel):
"""Provide parallel preparation of summary information for alignment and variant calling.
"""
sum_samples = run_parallel("pipeline_summary", samples)
qsign_info = run_parallel("qsignature_summary", [sum_samples])
summary_file = write_project_summary(sum_samples, qsign_info)
samples = []
for data in sum_samples:
if "summary" not in data[0]:
data[0]["summary"] = {}
data[0]["summary"]["project"] = summary_file
if qsign_info:
data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"]
samples.append(data)
samples = _add_researcher_summary(samples, summary_file)
return samples
def pipeline_summary(data):
"""Provide summary information on processing sample.
"""
work_bam = data.get("work_bam")
if data["sam_ref"] is not None and work_bam and work_bam.endswith(".bam"):
logger.info("Generating summary files: %s" % str(data["name"]))
data["summary"] = _run_qc_tools(work_bam, data)
return [[data]]
def prep_pdf(qc_dir, config):
"""Create PDF from HTML summary outputs in QC directory.
Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1
Thanks to: https://www.biostars.org/p/16991/
Works around issues with CSS conversion on CentOS by adjusting CSS.
"""
html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html")
html_fixed = "%s-fixed%s" % os.path.splitext(html_file)
try:
topdf = config_utils.get_program("wkhtmltopdf", config)
except config_utils.CmdNotFound:
topdf = None
if topdf and utils.file_exists(html_file):
out_file = "%s.pdf" % os.path.splitext(html_file)[0]
if not utils.file_exists(out_file):
cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s"
% (html_file, html_fixed))
do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf")
cmd = [topdf, html_fixed, out_file]
do.run(cmd, "Convert QC HTML to PDF")
return out_file
def _run_qc_tools(bam_file, data):
"""Run a set of third party quality control tools, returning QC directory and metrics.
:param bam_file: alignments in bam format
:param data: dict with all configuration information
:returns: dict with output of different tools
"""
metrics = {}
to_run = []
if "fastqc" not in tz.get_in(("config", "algorithm", "tools_off"), data, []):
to_run.append(("fastqc", _run_fastqc))
if data["analysis"].lower().startswith("rna-seq"):
# to_run.append(("rnaseqc", bcbio.rnaseq.qc.sample_summary))
# to_run.append(("coverage", _run_gene_coverage))
# to_run.append(("complexity", _run_complexity))
to_run.append(("qualimap", _rnaseq_qualimap))
elif data["analysis"].lower().startswith("chip-seq"):
to_run.append(["bamtools", _run_bamtools_stats])
else:
to_run += [("bamtools", _run_bamtools_stats), ("gemini", _run_gemini_stats)]
if data["analysis"].lower().startswith(("standard", "variant2")):
to_run.append(["qsignature", _run_qsignature_generator])
if "qualimap" in tz.get_in(("config", "algorithm", "tools_on"), data, []):
to_run.append(("qualimap", _run_qualimap))
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
metrics = {}
for program_name, qc_fn in to_run:
cur_qc_dir = os.path.join(qc_dir, program_name)
cur_metrics = qc_fn(bam_file, data, cur_qc_dir)
metrics.update(cur_metrics)
ratio = bam.get_aligned_reads(bam_file, data)
# if (ratio < 0.60 and data['config']["algorithm"].get("kraken", None) and
# (data["analysis"].lower().startswith("rna-seq") or
# data["analysis"].lower().startswith("standard"))):
if data['config']["algorithm"].get("kraken", None):
cur_metrics = _run_kraken(data, ratio)
metrics.update(cur_metrics)
bam.remove("%s-downsample%s" % os.path.splitext(bam_file))
metrics["Name"] = data["name"][-1]
metrics["Quality format"] = utils.get_in(data,
("config", "algorithm",
"quality_format"),
"standard").lower()
return {"qc": qc_dir, "metrics": metrics}
# ## Generate project level QC summary for quickly assessing large projects
def write_project_summary(samples, qsign_info=None):
"""Write project summary information on the provided samples.
write out dirs, genome resources,
"""
work_dir = samples[0][0]["dirs"]["work"]
out_file = os.path.join(work_dir, "project-summary.yaml")
upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"])
if "dir" in samples[0][0]["upload"] else "")
test_run = samples[0][0].get("test_run", False)
date = str(datetime.now())
prev_samples = _other_pipeline_samples(out_file, samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump({"date": date}, out_handle,
default_flow_style=False, allow_unicode=False)
if test_run:
yaml.safe_dump({"test_run": True}, out_handle, default_flow_style=False,
allow_unicode=False)
if qsign_info:
qsign_out = utils.deepish_copy(qsign_info[0])
qsign_out.pop("out_dir", None)
yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False,
allow_unicode=False)
yaml.safe_dump({"upload": upload_dir}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle,
default_flow_style=False, allow_unicode=False)
return out_file
def _other_pipeline_samples(summary_file, cur_samples):
"""Retrieve samples produced previously by another pipeline in the summary output.
"""
cur_descriptions = set([s[0]["description"] for s in cur_samples])
out = []
if os.path.exists(summary_file):
with open(summary_file) as in_handle:
for s in yaml.load(in_handle).get("samples", []):
if s["description"] not in cur_descriptions:
out.append(s)
return out
def _save_fields(sample):
to_save = ["dirs", "genome_resources", "genome_build", "sam_ref", "metadata",
"description"]
saved = {k: sample[k] for k in to_save if k in sample}
if "summary" in sample:
saved["summary"] = {"metrics": sample["summary"]["metrics"]}
# check if disambiguation was run
if "disambiguate" in sample:
if utils.file_exists(sample["disambiguate"]["summary"]):
disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"])
saved["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0]
disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0]
if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple))
else sample["config"]["algorithm"]["disambiguate"])
saved["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1]
saved["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2]
return saved
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [-1, -1, -1]
with open(disambiguatestatsfilename, "r") as in_handle:
header = in_handle.readline().strip().split("\t")
if header == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']:
disambig_stats_tmp = in_handle.readline().strip().split("\t")[1:]
if len(disambig_stats_tmp) == 3:
disambig_stats = [int(x) for x in disambig_stats_tmp]
return disambig_stats
# ## Generate researcher specific summaries
def _add_researcher_summary(samples, summary_yaml):
"""Generate summary files per researcher if organized via a LIMS.
"""
by_researcher = collections.defaultdict(list)
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
by_researcher[researcher].append(data["description"])
out_by_researcher = {}
for researcher, descrs in by_researcher.items():
out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher,
set(descrs), samples[0][0])
out = []
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
data["summary"]["researcher"] = out_by_researcher[researcher]
out.append([data])
return out
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data):
"""Generate a CSV file with summary information for a researcher on this project.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")),
"%s-summary.tsv" % run_info.clean_name(researcher))
metrics = ["Total reads", "Mapped reads", "Mapped reads pct", "Duplicates", "Duplicates pct"]
with open(summary_yaml) as in_handle:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(["Name"] + metrics)
for sample in yaml.safe_load(in_handle)["samples"]:
if sample["description"] in descrs:
row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "")
for x in metrics]
writer.writerow(row)
return out_file
# ## Run and parse read information from FastQC
class FastQCParser:
def __init__(self, base_dir, sample=None):
self._dir = base_dir
self.sample = sample
def get_fastqc_summary(self):
ignore = set(["Total Sequences", "Filtered Sequences",
"Filename", "File type", "Encoding"])
stats = {}
for stat_line in self._fastqc_data_section("Basic Statistics")[1:]:
k, v = stat_line.split("\t")[:2]
if k not in ignore:
stats[k] = v
return stats
def _fastqc_data_section(self, section_name):
out = []
in_section = False
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file):
with open(data_file) as in_handle:
for line in in_handle:
if line.startswith(">>%s" % section_name):
in_section = True
elif in_section:
if line.startswith(">>END"):
break
out.append(line.rstrip("\r\n"))
return out
def save_sections_into_file(self):
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file) and Fadapa:
parser = Fadapa(data_file)
module = [m[1] for m in parser.summary()][2:9]
for m in module:
out_file = os.path.join(self._dir, m.replace(" ", "_") + ".tsv")
dt = self._get_module(parser, m)
dt.to_csv(out_file, sep="\t", index=False)
def _get_module(self, parser, module):
"""
Get module using fadapa package
"""
dt = []
lines = parser.clean_data(module)
header = lines[0]
for data in lines[1:]:
if data[0].startswith("#"): #some modules have two headers
header = data
continue
if data[0].find("-") > -1: # expand positions 1-3 to 1, 2, 3
f, s = map(int, data[0].split("-"))
for pos in range(f, s):
dt.append([str(pos)] + data[1:])
else:
dt.append(data)
dt = pd.DataFrame(dt)
dt.columns = [h.replace(" ", "_") for h in header]
dt['sample'] = self.sample
return dt
def _run_gene_coverage(bam_file, data, out_dir):
out_file = os.path.join(out_dir, "gene_coverage.pdf")
ref_file = utils.get_in(data, ("genome_resources", "rnaseq", "transcripts"))
count_file = data["count_file"]
if utils.file_exists(out_file):
return out_file
with file_transaction(data, out_file) as tx_out_file:
plot_gene_coverage(bam_file, ref_file, count_file, tx_out_file)
return {"gene_coverage": out_file}
def _run_kraken(data, ratio):
"""Run kraken, generating report in specified directory and parsing metrics.
Using only first paired reads.
"""
logger.info("Number of aligned reads < than 0.60 in %s: %s" % (str(data["name"]), ratio))
logger.info("Running kraken to determine contaminant: %s" % str(data["name"]))
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
kraken_out = os.path.join(qc_dir, "kraken")
out = out_stats = None
db = data['config']["algorithm"]["kraken"]
kraken_cmd = config_utils.get_program("kraken", data["config"])
if db == "minikraken":
db = os.path.join(_get_data_dir(), "genomes", "kraken", "minikraken")
else:
if not os.path.exists(db):
logger.info("kraken: no database found %s, skipping" % db)
return {"kraken_report": "null"}
if not os.path.exists(os.path.join(kraken_out, "kraken_out")):
work_dir = os.path.dirname(kraken_out)
utils.safe_makedir(work_dir)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
fn_file = data["files"][0]
if fn_file.endswith("bam"):
logger.info("kraken: need fasta files as input")
return {"kraken_report": "null"}
with tx_tmpdir(data, work_dir) as tx_tmp_dir:
with utils.chdir(tx_tmp_dir):
out = os.path.join(tx_tmp_dir, "kraken_out")
out_stats = os.path.join(tx_tmp_dir, "kraken_stats")
cat = "zcat" if fn_file.endswith(".gz") else "cat"
cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick "
"--preload --min-hits 2 "
"--threads {num_cores} "
"--out {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals())
do.run(cl, "kraken: %s" % data["name"][-1])
if os.path.exists(kraken_out):
shutil.rmtree(kraken_out)
shutil.move(tx_tmp_dir, kraken_out)
metrics = _parse_kraken_output(kraken_out, db, data)
return metrics
def _parse_kraken_output(out_dir, db, data):
"""Parse kraken stat info comming from stderr,
generating report with kraken-report
"""
in_file = os.path.join(out_dir, "kraken_out")
stat_file = os.path.join(out_dir, "kraken_stats")
out_file = os.path.join(out_dir, "kraken_summary")
kraken_cmd = config_utils.get_program("kraken-report", data["config"])
classify = unclassify = None
with open(stat_file, 'r') as handle:
for line in handle:
if line.find(" classified") > -1:
classify = line[line.find("(") + 1:line.find(")")]
if line.find(" unclassified") > -1:
unclassify = line[line.find("(") + 1:line.find(")")]
if os.path.getsize(in_file) > 0 and not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals())
do.run(cl, "kraken report: %s" % data["name"][-1])
kraken = {"kraken_clas": classify, "kraken_unclas": unclassify}
kraken_sum = _summarize_kraken(out_file)
kraken.update(kraken_sum)
return kraken
def _summarize_kraken(fn):
"""get the value at species level"""
kraken = {}
list_sp, list_value = [], []
with open(fn) as handle:
for line in handle:
cols = line.strip().split("\t")
sp = cols[5].strip()
if len(sp.split(" ")) > 1 and not sp.startswith("cellular"):
list_sp.append(sp)
list_value.append(cols[0])
kraken = {"kraken_sp": list_sp, "kraken_value": list_value}
return kraken
def _run_fastqc(bam_file, data, fastqc_out):
"""Run fastqc, generating report in specified directory and parsing metrics.
Downsamples to 10 million reads to avoid excessive processing times with large
files, unless we're running a Standard/QC pipeline.
Handles fastqc 0.11+, which use a single HTML file and older versions that use
a directory of files + images. The goal is to eventually move to only 0.11+
"""
sentry_file = os.path.join(fastqc_out, "fastqc_report.html")
if not os.path.exists(sentry_file):
work_dir = os.path.dirname(fastqc_out)
utils.safe_makedir(work_dir)
ds_bam = (bam.downsample(bam_file, data, 1e7)
if data.get("analysis", "").lower() not in ["standard"]
else None)
bam_file = ds_bam if ds_bam else bam_file
fastqc_name = os.path.splitext(os.path.basename(bam_file))[0]
num_cores = data["config"]["algorithm"].get("num_cores", 1)
with tx_tmpdir(data, work_dir) as tx_tmp_dir:
with utils.chdir(tx_tmp_dir):
cl = [config_utils.get_program("fastqc", data["config"]),
"-t", str(num_cores), "--extract", "-o", tx_tmp_dir, "-f", "bam", bam_file]
do.run(cl, "FastQC: %s" % data["name"][-1])
tx_fastqc_out = os.path.join(tx_tmp_dir, "%s_fastqc" % fastqc_name)
tx_combo_file = os.path.join(tx_tmp_dir, "%s_fastqc.html" % fastqc_name)
if os.path.exists("%s.zip" % tx_fastqc_out):
os.remove("%s.zip" % tx_fastqc_out)
if not os.path.exists(sentry_file) and os.path.exists(tx_combo_file):
utils.safe_makedir(fastqc_out)
shutil.move(os.path.join(tx_fastqc_out, "fastqc_data.txt"), fastqc_out)
shutil.move(tx_combo_file, sentry_file)
elif not os.path.exists(sentry_file):
if os.path.exists(fastqc_out):
shutil.rmtree(fastqc_out)
shutil.move(tx_fastqc_out, fastqc_out)
parser = FastQCParser(fastqc_out, data["name"][-1])
stats = parser.get_fastqc_summary()
parser.save_sections_into_file()
return stats
def _run_complexity(bam_file, data, out_dir):
try:
import pandas as pd
import statsmodels.formula.api as sm
except ImportError:
return {"Unique Starts Per Read": "NA"}
SAMPLE_SIZE = 1000000
base, _ = os.path.splitext(os.path.basename(bam_file))
utils.safe_makedir(out_dir)
out_file = os.path.join(out_dir, base + ".pdf")
df = bcbio.rnaseq.qc.starts_by_depth(bam_file, data["config"], SAMPLE_SIZE)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tmp_out_file:
df.plot(x='reads', y='starts', title=bam_file + " complexity")
fig = plt.gcf()
fig.savefig(tmp_out_file)
print "file saved as", out_file
print "out_dir is", out_dir
return bcbio.rnaseq.qc.estimate_library_complexity(df)
# ## Qualimap
def _parse_num_pct(k, v):
num, pct = v.split(" / ")
return {k: num.replace(",", "").strip(), "%s pct" % k: pct.strip()}
def _parse_qualimap_globals(table):
"""Retrieve metrics of interest from globals table.
"""
out = {}
want = {"Mapped reads": _parse_num_pct,
"Duplication rate": lambda k, v: {k: v}}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col in want:
out.update(want[col](col, val))
return out
def _parse_qualimap_globals_inregion(table):
"""Retrieve metrics from the global targeted region table.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Mapped reads":
out.update(_parse_num_pct("%s (in regions)" % col, val))
return out
def _parse_qualimap_coverage(table):
"""Parse summary qualimap coverage metrics.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Mean":
out["Coverage (Mean)"] = val
return out
def _parse_qualimap_insertsize(table):
"""Parse insert size metrics.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Median":
out["Insert size (Median)"] = val
return out
def _parse_qualimap_metrics(report_file):
"""Extract useful metrics from the qualimap HTML report file.
"""
out = {}
parsers = {"Globals": _parse_qualimap_globals,
"Globals (inside of regions)": _parse_qualimap_globals_inregion,
"Coverage": _parse_qualimap_coverage,
"Coverage (inside of regions)": _parse_qualimap_coverage,
"Insert size": _parse_qualimap_insertsize,
"Insert size (inside of regions)": _parse_qualimap_insertsize}
root = lxml.html.parse(report_file).getroot()
for table in root.xpath("//div[@class='table-summary']"):
header = table.xpath("h3")[0].text
if header in parsers:
out.update(parsers[header](table))
return out
def _bed_to_bed6(orig_file, out_dir):
"""Convert bed to required bed6 inputs.
"""
bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file)))
if not utils.file_exists(bed6_file):
with open(bed6_file, "w") as out_handle:
for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)):
region = [x for x in list(region) if x]
fillers = [str(i), "1.0", "+"]
full = region + fillers[:6 - len(region)]
out_handle.write("\t".join(full) + "\n")
return bed6_file
def _run_qualimap(bam_file, data, out_dir):
"""Run qualimap to assess alignment quality metrics.
"""
report_file = os.path.join(out_dir, "qualimapReport.html")
if not os.path.exists(report_file):
ds_bam = bam.downsample(bam_file, data, 1e7)
bam_file = ds_bam if ds_bam else bam_file
utils.safe_makedir(out_dir)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
qualimap = config_utils.get_program("qualimap", data["config"])
resources = config_utils.get_resources("qualimap", data["config"])
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
num_cores)
cmd = ("unset DISPLAY && {qualimap} bamqc -bam {bam_file} -outdir {out_dir} "
"-nt {num_cores} --java-mem-size={max_mem}")
species = data["genome_resources"]["aliases"].get("ensembl", "").upper()
if species in ["HUMAN", "MOUSE"]:
cmd += " -gd {species}"
regions = bedutils.merge_overlaps(dd.get_variant_regions(data), data)
if regions:
bed6_regions = _bed_to_bed6(regions, out_dir)
cmd += " -gff {bed6_regions}"
do.run(cmd.format(**locals()), "Qualimap: %s" % data["name"][-1])
return _parse_qualimap_metrics(report_file)
# ## RNAseq Qualimap
def _parse_metrics(metrics):
# skipped metrics can sometimes be in unicode, replace unicode with NA if it exists
metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics)
missing = set(["Genes Detected", "Transcripts Detected",
"Mean Per Base Cov."])
correct = set(["Intergenic pct", "Intronic pct", "Exonic pct"])
to_change = dict({"5'-3' bias": 1, "Intergenic pct": "Intergenic Rate",
"Intronic pct": "Intronic Rate", "Exonic pct": "Exonic Rate",
"Not aligned": 0, 'Aligned to genes': 0, 'Non-unique alignment': 0,
"No feature assigned": 0, "Duplication Rate of Mapped": 1,
"Fragment Length Mean": 1,
"rRNA": 1, "Ambiguou alignment": 0})
total = ["Not aligned", "Aligned to genes", "No feature assigned"]
out = {}
total_reads = sum([int(metrics[name]) for name in total])
out['rRNA rate'] = 1.0 * int(metrics["rRNA"]) / total_reads
out['Mapped'] = sum([int(metrics[name]) for name in total[1:]])
out['Mapping Rate'] = 1.0 * int(out['Mapped']) / total_reads
[out.update({name: 0}) for name in missing]
[metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in correct]
for name in to_change:
if not to_change[name]:
continue
if to_change[name] == 1:
out.update({name: float(metrics[name])})
else:
out.update({to_change[name]: float(metrics[name])})
return out
def _detect_duplicates(bam_file, out_dir, config):
"""
Detect duplicates metrics with Picard
"""
out_file = os.path.join(out_dir, "dup_metrics")
if not utils.file_exists(out_file):
broad_runner = broad.runner_from_config(config)
(dup_align_bam, metrics_file) = broad_runner.run_fn("picard_mark_duplicates", bam_file, remove_dups=True)
shutil.move(metrics_file, out_file)
metrics = []
with open(out_file) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if line and not line[0].startswith("#"):
metrics.append(line)
metrics = dict(zip(metrics[0], metrics[1]))
return {"Duplication Rate of Mapped": metrics["PERCENT_DUPLICATION"]}
def _transform_browser_coor(rRNA_interval, rRNA_coor):
"""
transform interval format to browser coord: chr:start-end
"""
with open(rRNA_coor, 'w') as out_handle:
with open(rRNA_interval, 'r') as in_handle:
for line in in_handle:
c, bio, source, s, e = line.split("\t")[:5]
if bio.startswith("rRNA"):
out_handle.write(("{0}:{1}-{2}\n").format(c, s, e))
def _detect_rRNA(config, bam_file, rRNA_file, ref_file, out_dir, single_end):
"""
Calculate rRNA with gatk-framework
"""
if not utils.file_exists(rRNA_file):
return {'rRNA': 0}
out_file = os.path.join(out_dir, "rRNA.counts")
if not utils.file_exists(out_file):
out_file = _count_rRNA_reads(bam_file, out_file, ref_file, rRNA_file, single_end, config)
with open(out_file) as in_handle:
for line in in_handle:
if line.find("CountReads counted") > -1:
rRNA_reads = line.split()[6]
break
return {'rRNA': rRNA_reads}
def _count_rRNA_reads(in_bam, out_file, ref_file, rRNA_interval, single_end, config):
"""Use GATK counter to count reads in rRNA genes
"""
bam.index(in_bam, config)
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
rRNA_coor = os.path.join(os.path.dirname(out_file), "rRNA.list")
_transform_browser_coor(rRNA_interval, rRNA_coor)
params = ["-T", "CountReads",
"-R", ref_file,
"-I", in_bam,
"-log", tx_out_file,
"-L", rRNA_coor,
"--filter_reads_with_N_cigar",
"-allowPotentiallyMisencodedQuals"]
jvm_opts = broad.get_gatk_framework_opts(config)
cmd = [config_utils.get_program("gatk-framework", config)] + jvm_opts + params
do.run(cmd, "counts rRNA for %s" % in_bam)
return out_file
def _parse_qualimap_rnaseq(table):
"""
Retrieve metrics of interest from globals table.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
col = col.replace(":", "").strip()
val = val.replace(",", "")
m = {col: val}
if val.find("/") > -1:
m = _parse_num_pct(col, val.replace("%", ""))
out.update(m)
return out
def _parse_rnaseq_qualimap_metrics(report_file):
"""Extract useful metrics from the qualimap HTML report file.
"""
out = {}
parsers = ["Reads alignment", "Reads genomic origin", "Transcript coverage profile"]
root = lxml.html.parse(report_file).getroot()
for table in root.xpath("//div[@class='table-summary']"):
header = table.xpath("h3")[0].text
if header in parsers:
out.update(_parse_qualimap_rnaseq(table))
return out
def _rnaseq_qualimap(bam_file, data, out_dir):
"""
Run qualimap for a rnaseq bam file and parse results
"""
report_file = os.path.join(out_dir, "qualimapReport.html")
config = data["config"]
gtf_file = dd.get_gtf_file(data)
ref_file = dd.get_ref_file(data)
single_end = not bam.is_paired(bam_file)
if not utils.file_exists(report_file):
utils.safe_makedir(out_dir)
bam.index(bam_file, config)
cmd = _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file, single_end)
do.run(cmd, "Qualimap for {}".format(data["name"][-1]))
metrics = _parse_rnaseq_qualimap_metrics(report_file)
metrics.update(_detect_duplicates(bam_file, out_dir, config))
metrics.update(_detect_rRNA(config, bam_file, gtf_file, ref_file, out_dir, single_end))
metrics.update({"Fragment Length Mean": bam.estimate_fragment_size(bam_file)})
metrics = _parse_metrics(metrics)
return metrics
def _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file=None, single_end=None):
"""
Create command lines for qualimap
"""
qualimap = config_utils.get_program("qualimap", config)
resources = config_utils.get_resources("qualimap", config)
num_cores = resources.get("cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "4G"),
num_cores)
cmd = ("unset DISPLAY && {qualimap} rnaseq -outdir {out_dir} -a proportional -bam {bam_file} "
"-gtf {gtf_file} --java-mem-size={max_mem}").format(**locals())
return cmd
# ## Lightweight QC approaches
def _parse_bamtools_stats(stats_file):
out = {}
want = set(["Total reads", "Mapped reads", "Duplicates", "Median insert size"])
with open(stats_file) as in_handle:
for line in in_handle:
parts = line.split(":")
if len(parts) == 2:
metric, stat_str = parts
metric = metric.split("(")[0].strip()
if metric in want:
stat_parts = stat_str.split()
if len(stat_parts) == 2:
stat, pct = stat_parts
pct = pct.replace("(", "").replace(")", "")
else:
stat = stat_parts[0]
pct = None
out[metric] = stat
if pct:
out["%s pct" % metric] = pct
return out
def _parse_offtargets(bam_file):
"""
Add to metrics off-targets reads if it exitst
"""
off_target = bam_file.replace(".bam", "-offtarget-stats.yaml")
if os.path.exists(off_target):
res = yaml.load(open(off_target))
return res
return {}
def _run_bamtools_stats(bam_file, data, out_dir):
"""Run bamtools stats with reports on mapped reads, duplicates and insert sizes.
"""
stats_file = os.path.join(out_dir, "bamtools_stats.txt")
if not utils.file_exists(stats_file):
utils.safe_makedir(out_dir)
bamtools = config_utils.get_program("bamtools", data["config"])
with file_transaction(data, stats_file) as tx_out_file:
cmd = "{bamtools} stats -in {bam_file}"
if bam.is_paired(bam_file):
cmd += " -insert"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "bamtools stats", data)
out = _parse_bamtools_stats(stats_file)
out.update(_parse_offtargets(bam_file))
return out
## Variant statistics from gemini
def _run_gemini_stats(bam_file, data, out_dir):
"""Retrieve high level variant statistics from Gemini.
"""
out = {}
gemini_dbs = [d for d in
[tz.get_in(["population", "db"], x) for x in data.get("variants", [])] if d]
if len(gemini_dbs) > 0:
gemini_db = gemini_dbs[0]
gemini_stat_file = "%s-stats.yaml" % os.path.splitext(gemini_db)[0]
if not utils.file_uptodate(gemini_stat_file, gemini_db):
gemini = config_utils.get_program("gemini", data["config"])
tstv = subprocess.check_output([gemini, "stats", "--tstv", gemini_db])
gt_counts = subprocess.check_output([gemini, "stats", "--gts-by-sample", gemini_db])
dbsnp_count = subprocess.check_output([gemini, "query", gemini_db, "-q",
"SELECT count(*) FROM variants WHERE in_dbsnp==1"])
out["Transition/Transversion"] = tstv.split("\n")[1].split()[-1]
for line in gt_counts.split("\n"):
parts = line.rstrip().split()
if len(parts) > 0 and parts[0] != "sample":
name, hom_ref, het, hom_var, _, total = parts
out[name] = {}
out[name]["Variations (heterozygous)"] = int(het)
out[name]["Variations (homozygous)"] = int(hom_var)
# same total variations for all samples, keep that top level as well.
out["Variations (total)"] = int(total)
out["Variations (in dbSNP)"] = int(dbsnp_count.strip())
if out.get("Variations (total)") > 0:
out["Variations (in dbSNP) pct"] = "%.1f%%" % (out["Variations (in dbSNP)"] /
float(out["Variations (total)"]) * 100.0)
with open(gemini_stat_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
else:
with open(gemini_stat_file) as in_handle:
out = yaml.safe_load(in_handle)
res = {}
for k, v in out.iteritems():
if not isinstance(v, dict):
res.update({k: v})
if k == data['name'][-1]:
res.update(v)
return res
## qsignature
def _run_qsignature_generator(bam_file, data, out_dir):
""" Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary
:param bam_file: (str) path of the bam_file
:param data: (list) list containing the all the dictionary
for this sample
:param out_dir: (str) path of the output
:returns: (dict) dict with the normalize vcf file
"""
position = dd.get_qsig_file(data)
mixup_check = dd.get_mixup_check(data)
if mixup_check and mixup_check.startswith("qsignature"):
if not position:
logger.info("There is no qsignature for this species: %s"
% tz.get_in(['genome_build'], data))
return {}
jvm_opts = "-Xms750m -Xmx2g"
limit_reads = 20000000
if mixup_check == "qsignature_full":
slice_bam = bam_file
jvm_opts = "-Xms750m -Xmx8g"
limit_reads = 100000000
else:
slice_bam = _slice_chr22(bam_file, data)
qsig = config_utils.get_program("qsignature", data["config"])
if not qsig:
return {}
utils.safe_makedir(out_dir)
out_name = os.path.basename(slice_bam).replace("bam", "qsig.vcf")
out_file = os.path.join(out_dir, out_name)
log_file = os.path.join(out_dir, "qsig.log")
cores = dd.get_cores(data)
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureGenerator "
"--noOfThreads {cores} "
"-log {log_file} -i {position} "
"-i {down_file} ")
if not os.path.exists(out_file):
down_file = bam.downsample(slice_bam, data, limit_reads)
if not down_file:
down_file = slice_bam
file_qsign_out = "{0}.qsig.vcf".format(down_file)
do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % data["name"][-1])
if os.path.exists(file_qsign_out):
with file_transaction(data, out_file) as file_txt_out:
shutil.move(file_qsign_out, file_txt_out)
else:
raise IOError("File doesn't exist %s" % file_qsign_out)
return {'qsig_vcf': out_file}
return {}
def qsignature_summary(*samples):
"""Run SignatureCompareRelatedSimple module from qsignature tool.
Creates a matrix of pairwise comparison among samples. The
function will not run if the output exists
:param samples: list with only one element containing all samples information
:returns: (dict) with the path of the output to be joined to summary
"""
warnings, similar = [], []
qsig = config_utils.get_program("qsignature", samples[0][0]["config"])
if not qsig:
return [[]]
jvm_opts = "-Xms750m -Xmx8g"
work_dir = samples[0][0]["dirs"]["work"]
count = 0
for data in samples:
data = data[0]
vcf = tz.get_in(["summary", "metrics", "qsig_vcf"], data)
if vcf:
count += 1
vcf_name = data["name"][-1] + ".qsig.vcf"
out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature"))
if not os.path.lexists(os.path.join(out_dir, vcf_name)):
os.symlink(vcf, os.path.join(out_dir, vcf_name))
if count > 0:
qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature"))
out_file = os.path.join(qc_out_dir, "qsignature.xml")
out_ma_file = os.path.join(qc_out_dir, "qsignature.ma")
out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings")
log = os.path.join(work_dir, "qsignature", "qsig-summary.log")
if not os.path.exists(out_file):
with file_transaction(samples[0][0], out_file) as file_txt_out:
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureCompareRelatedSimple "
"-log {log} -dir {out_dir} "
"-o {file_txt_out} ")
do.run(base_cmd.format(**locals()), "qsignature score calculation")
error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file,
out_warn_file, samples[0][0])
return [{'total samples': count,
'similar samples pairs': len(similar),
'warnings samples pairs': len(warnings),
'error samples': list(error),
'out_dir': qc_out_dir}]
else:
return []
def _parse_qsignature_output(in_file, out_file, warning_file, data):
""" Parse xml file produced by qsignature
:param in_file: (str) with the path to the xml file
:param out_file: (str) with the path to output file
:param warning_file: (str) with the path to warning file
:returns: (list) with samples that could be duplicated
"""
name = {}
error, warnings, similar = set(), set(), set()
same, replicate, related = 0, 0.1, 0.18
mixup_check = dd.get_mixup_check(data)
if mixup_check == "qsignature_full":
same, replicate, related = 0, 0.01, 0.061
with open(in_file, 'r') as in_handle:
with file_transaction(data, out_file) as out_tx_file:
with file_transaction(data, warning_file) as warn_tx_file:
with open(out_tx_file, 'w') as out_handle:
with open(warn_tx_file, 'w') as warn_handle:
et = lxml.etree.parse(in_handle)
for i in list(et.iter('file')):
name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "")
for i in list(et.iter('comparison')):
msg = None
pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]])
out_handle.write("%s\t%s\t%s\n" %
(name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score']))
if float(i.attrib['score']) == same:
msg = 'qsignature ERROR: read same samples:%s\n'
error.add(pair)
elif float(i.attrib['score']) < replicate:
msg = 'qsignature WARNING: read similar/replicate samples:%s\n'
warnings.add(pair)
elif float(i.attrib['score']) < related:
msg = 'qsignature NOTE: read relative samples:%s\n'
similar.add(pair)
if msg:
logger.info(msg % pair)
warn_handle.write(msg % pair)
return error, warnings, similar
def _slice_chr22(in_bam, data):
"""
return only one BAM file with only chromosome 22
"""
sambamba = config_utils.get_program("sambamba", data["config"])
out_file = "%s-chr%s" % os.path.splitext(in_bam)
if not utils.file_exists(out_file):
bam.index(in_bam, data['config'])
with contextlib.closing(pysam.Samfile(in_bam, "rb")) as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
chromosome = "22"
if "chr22" in bam_contigs:
chromosome = "chr22"
with file_transaction(data, out_file) as tx_out_file:
cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals())
out = subprocess.check_output(cmd, shell=True)
return out_file
|
verdurin/bcbio-nextgen
|
bcbio/pipeline/qcsummary.py
|
Python
|
mit
| 43,958
|
[
"pysam"
] |
3fcf43ec1eef81f3e7a453c57c7c2b4b0570bf648f5f624b6fa3f973272d7644
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
from PyQt5 import QtCore, QtWidgets
import peacock
from .PostprocessorPlugin import PostprocessorPlugin
class AxesSettingsPlugin(QtWidgets.QGroupBox, PostprocessorPlugin):
"""
Widget for controlling global axes settings.
Args:
axes[pyplot.Axes]: The axes object to apply the settings.
args[tuple]: Passed to QtWidgets.QGroupBox widget
Kwargs:
key, value pairs are passed to MooseWidget object.
"""
#: list: List of all possible legend locations
legend_loc = ['best', 'upper right', 'upper left', 'lower left', 'lower right', 'right', 'center left', 'center right', 'lower center', 'upper center', 'center']
#: pyqtSingal: Should be emitted when the axes have been modified.
axesModified = QtCore.pyqtSignal()
def __init__(self, *args, **kwargs):
peacock.base.MooseWidget.__init__(self)
QtWidgets.QWidget.__init__(self, *args)
PostprocessorPlugin.__init__(self, **kwargs)
self.MainLayout = QtWidgets.QVBoxLayout()
self.setLayout(self.MainLayout)
# Title
self.TitleLayout = QtWidgets.QHBoxLayout()
self.TitleLabel = QtWidgets.QLabel('Title:')
self.Title = QtWidgets.QLineEdit()
self.TitleLayout.addWidget(self.TitleLabel)
self.TitleLayout.addWidget(self.Title)
# Legend Toggles
self.LegendLayout = QtWidgets.QGridLayout()
self.Legend = QtWidgets.QCheckBox('Legend (Left) ')
self.LegendLocation = QtWidgets.QComboBox()
self.Legend2 = QtWidgets.QCheckBox('Legend (Right)')
self.Legend2Location = QtWidgets.QComboBox()
self.LegendLayout.addWidget(self.Legend, 0, 0)
self.LegendLayout.addWidget(self.LegendLocation, 0, 1)
self.LegendLayout.addWidget(self.Legend2, 1, 0)
self.LegendLayout.addWidget(self.Legend2Location, 1, 1)
self.MainLayout.addLayout(self.TitleLayout)
self.MainLayout.addLayout(self.LegendLayout)
self.setup()
def onSetData(self, data):
"""
Initialize the widget.
"""
self.onAxesModified()
@QtCore.pyqtSlot()
def onAxesModified(self):
"""
Updates the Axes with the settings from this widget.
"""
# Legend helper function
def setup_legend(box, loc, axes):
has_data = axes.has_data()
box.setEnabled(has_data)
checked = box.isChecked()
loc.setEnabled(checked and has_data)
if has_data and checked:
legend = axes.legend(loc=loc.currentText())
legend.set_visible(True)
return
legend = axes.get_legend()
if legend:
legend.set_visible(False)
# Legends
setup_legend(self.Legend, self.LegendLocation, self.axes(0))
setup_legend(self.Legend2, self.Legend2Location, self.axes(1))
# Title
self.axes(0).set_title(self.Title.text())
self.axesModified.emit()
def repr(self):
"""
Returns a representation of this widget for use in python script.
"""
output = []
if self.Legend.isChecked():
loc = self.legend_loc[self.LegendLocation.currentIndex()]
output += ["axes0.legend(loc={})".format(repr(loc))]
if self.Legend2.isChecked():
loc = self.legend_loc[self.Legend2Location.currentIndex()]
output += ["axes1.legend(loc={})".format(repr(loc))]
title = str(self.Title.text())
if title:
output += ["axes0.set_title({})".format(repr(title))]
if output:
output.insert(0, '\n# Axes Settings')
return output, []
def _setupTitle(self, qobject):
"""
Setup method for Title widget.
"""
qobject.editingFinished.connect(self.onAxesModified)
def _setupLegend(self, qobject):
"""
Setup method for left-hand legend toggle widget.
"""
qobject.clicked.connect(self.onAxesModified)
def _setupLegend2(self, qobject):
"""
Setup method of right-hand legend.
"""
qobject.clicked.connect(self.onAxesModified)
def _setupLegendLocation(self, qobject):
"""
Setup method for legend (left) position widget.
"""
qobject.setEnabled(False)
for loc in self.legend_loc:
qobject.addItem(loc)
qobject.currentIndexChanged.connect(self.onAxesModified)
def _setupLegend2Location(self, qobject):
"""
Setup method for legend (right) position widget.
"""
qobject.setEnabled(False)
for loc in self.legend_loc:
qobject.addItem(loc)
qobject.currentIndexChanged.connect(self.onAxesModified)
def main(filenames):
from ..PostprocessorViewer import PostprocessorViewer
from .FigurePlugin import FigurePlugin
from .PostprocessorSelectPlugin import PostprocessorSelectPlugin
import mooseutils
import matplotlib
matplotlib.rcParams["figure.figsize"] = (6.25, 6.25)
matplotlib.rcParams["figure.dpi"] = (100)
widget = PostprocessorViewer(mooseutils.PostprocessorReader, timeout=None, plugins=[FigurePlugin, AxesSettingsPlugin, PostprocessorSelectPlugin])
widget.onSetFilenames(filenames)
control = widget.currentWidget().AxesSettingsPlugin
window = widget.currentWidget().FigurePlugin
window.setFixedSize(QtCore.QSize(625, 625))
widget.show()
return control, widget, window
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
control, widget, window = main(['../../../tests/input/white_elephant_jan_2016.csv'])
sys.exit(app.exec_())
|
harterj/moose
|
python/peacock/PostprocessorViewer/plugins/AxesSettingsPlugin.py
|
Python
|
lgpl-2.1
| 6,046
|
[
"MOOSE"
] |
3689f1a458896c897dbcbb7b04c82b314841ad175b438acf38a1f7ca4595a6bd
|
#!/usr/bin/env python2
desc="""Filter pairs with at least one read aligned.
"""
epilog="""Author: [email protected]
Mizerow, 25/06/2014
"""
import os, sys
import pysam
from datetime import datetime
from Bio.Seq import Seq
def pair2interleaved_fasta(pair):
"""Report interleaved fasta."""
return "".join(">%s/%s\n%s\n"%(a.qname, i, a.seq) for i, a in enumerate(pair, 1))
def sam2one_aligned(samfn, out, pair2out, verbose):
"""Parse SAM algs and report pairs with at least on read aligned."""
k = 0
if samfn=="-":
sam = pysam.Samfile(samfn, "r")
else:
sam = pysam.Samfile(samfn)
pair = []
for i, a in enumerate(sam, 1):
if verbose and not i%1e5:
sys.stderr.write(" %s %s\r"%(i, k))
#skip if both unmapped or if secondary or supplementary alg
if a.is_unmapped and a.mate_is_unmapped or a.is_secondary or a.flag & 2048:
continue
#reverse complement
if a.is_reverse:
a.seq, a.qual = str(Seq(a.seq).reverse_complement()), a.qual[::-1]
#count correct
k += 1
pair.append(a)
#report
if len(pair)==2:
out.write(pair2out(pair))
pair = []
#report stats
info = "%s algs processed. %s [%.2f%s] reads reported.\n"
sys.stderr.write(info%(i, k, k*100.0/i, '%'))
def main():
import argparse
usage = "bwa mem REF read1 read2 | %(prog)s -v"
parser = argparse.ArgumentParser(description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version='1.0b')
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="verbose")
parser.add_argument("-i", "--sam", default="-",
help="input SAM/BAM stream [stdin]")
parser.add_argument("-o", "--output", default=sys.stdout, type=argparse.FileType('w'),
help="output stream [stdout]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
#define function converting to output format
pair2out = pair2interleaved_fasta
#process
sam2one_aligned(o.sam, o.output, pair2out, o.verbose)
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
except IOError as e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n"%dt)
|
lpryszcz/bin
|
sam2one_aligned.py
|
Python
|
gpl-3.0
| 2,667
|
[
"BWA",
"pysam"
] |
c39b1c97c46101f29ba9b78cebf9233386e4e20638eab5deae0e48a7a756a6d9
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i, h2o_exec as h2e
def define_params():
paramDict = {
'standardize': [None, 0,1],
'beta_epsilon': [None, 0.0001],
'family': [None, 'gaussian', 'binomial', 'poisson'],
'lambda': [0,1e-8,1e-4,1e-3],
'alpha': [0,0.8,0.75],
'ignored_cols': [1,'C1','1,2','C1,C2'],
'max_iter': [None, 10],
'higher_accuracy': [None, 0, 1],
'use_all_factor_levels': [None, 0, 1],
'lambda_search': [None, 0], # FIX! what if lambda is set when lambda_search=1
'tweedie_variance_power': [None, 0, 1],
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_lambda_search(self):
csvPathname = 'covtype/covtype.20k.data'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put', hex_key="covtype.20k")
CLASS = 1
# make a binomial version
execExpr="B.hex=%s; B.hex[,%s]=(B.hex[,%s]==%s)" % ('covtype.20k', 54+1, 54+1, CLASS)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
paramDict = define_params()
for trial in range(8):
params = {}
colX = h2o_glm.pickRandGlmParams(paramDict, params)
# override choices with these
params = {
'response': 54,
'alpha': 0.1,
'max_iter': 8,
# 'lambda': 1e-4,
# 'lambda': 0,
'lambda': None,
'lambda_search': 1,
'n_folds': 1,
}
kwargs = params.copy()
if 'family' not in kwargs or kwargs['family']=='binomial':
bHack = {'destination_key': 'B.hex'}
else:
bHack = parseResult
start = time.time()
glm = h2o_cmd.runGLM(timeoutSecs=300, parseResult=bHack, **kwargs)
# pass the kwargs with all the params, so we know what we asked for!
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
h2o.check_sandbox_for_errors()
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
|
111t8e/h2o-2
|
py/testdir_single_jvm/test_GLM2_lambda_search.py
|
Python
|
apache-2.0
| 2,630
|
[
"Gaussian"
] |
69c14962cd2b5df0f586b13f9e95685a730cb6473ff39263b8d4d04126327be3
|
#
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2015 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import os
import math
import time
import pylab
import numpy
import matplotlib.pyplot as plt
import moose
diffConst = 1e-11
chemdt = 0.001 # Tested various dts, this is reasonable.
diffdt = 0.001
plotdt = 0.01
animationdt = 0.01
runtime = 1
useGssa = True
sdir = os.path.dirname( __file__ )
def makeModel():
model = moose.Neutral( '/model' )
# Make neuronal model. It has no channels, just for geometry
cell = moose.loadModel( os.path.join(sdir,'./spinyNeuron.p'), '/model/cell', 'Neutral' )
print(cell)
# We don't want the cell to do any calculations. Disable everything.
for i in moose.wildcardFind( '/model/cell/##' ):
i.tick = -1
# create container for model
model = moose.element( '/model' )
chem = moose.Neutral( '/model/chem' )
# The naming of the compartments is dicated by the places that the
# chem model expects to be loaded.
compt0 = moose.NeuroMesh( '/model/chem/compt0' )
compt0.separateSpines = 1
compt0.geometryPolicy = 'cylinder'
compt1 = moose.SpineMesh( '/model/chem/compt1' )
moose.connect( compt0, 'spineListOut', compt1, 'spineList', 'OneToOne' )
compt2 = moose.PsdMesh( '/model/chem/compt2' )
moose.connect( compt0, 'psdListOut', compt2, 'psdList', 'OneToOne' )
#reacSystem = moose.loadModel( 'simpleOsc.g', '/model/chem', 'ee' )
makeChemModel( compt0, True ) # Populate all 3 compts with the chem system.
makeChemModel( compt1, False )
makeChemModel( compt2, True )
compt0.diffLength = 2e-6 # This will be over 100 compartments.
# This is the magic command that configures the diffusion compartments.
compt0.subTreePath = cell.path + "/#"
moose.showfields( compt0 )
# Build the solvers. No need for diffusion in this version.
ksolve0 = moose.Ksolve( '/model/chem/compt0/ksolve' )
if useGssa:
ksolve1 = moose.Gsolve( '/model/chem/compt1/ksolve' )
ksolve2 = moose.Gsolve( '/model/chem/compt2/ksolve' )
else:
ksolve1 = moose.Ksolve( '/model/chem/compt1/ksolve' )
ksolve2 = moose.Ksolve( '/model/chem/compt2/ksolve' )
dsolve0 = moose.Dsolve( '/model/chem/compt0/dsolve' )
dsolve1 = moose.Dsolve( '/model/chem/compt1/dsolve' )
dsolve2 = moose.Dsolve( '/model/chem/compt2/dsolve' )
stoich0 = moose.Stoich( '/model/chem/compt0/stoich' )
stoich1 = moose.Stoich( '/model/chem/compt1/stoich' )
stoich2 = moose.Stoich( '/model/chem/compt2/stoich' )
# Configure solvers
stoich0.compartment = compt0
stoich1.compartment = compt1
stoich2.compartment = compt2
stoich0.ksolve = ksolve0
stoich1.ksolve = ksolve1
stoich2.ksolve = ksolve2
stoich0.dsolve = dsolve0
stoich1.dsolve = dsolve1
stoich2.dsolve = dsolve2
stoich0.reacSystemPath = '/model/chem/compt0/#'
stoich1.reacSystemPath = '/model/chem/compt1/#'
stoich2.reacSystemPath = '/model/chem/compt2/#'
assert( stoich0.numVarPools == 1 )
assert( stoich0.numProxyPools == 0 )
assert( stoich0.numRates == 1 )
assert( stoich1.numVarPools == 1 )
assert( stoich1.numProxyPools == 0 )
if useGssa:
assert( stoich1.numRates == 2 )
assert( stoich2.numRates == 2 )
else:
assert( stoich1.numRates == 1 )
assert( stoich2.numRates == 1 )
assert( stoich2.numVarPools == 1 )
assert( stoich2.numProxyPools == 0 )
dsolve0.buildNeuroMeshJunctions( dsolve1, dsolve2 )
#stoich0.buildXreacs( stoich1 )
#stoich1.buildXreacs( stoich2 )
#stoich0.filterXreacs()
#stoich1.filterXreacs()
#stoich2.filterXreacs()
Ca_input_dend = moose.vec( '/model/chem/compt0/Ca_input' )
print(len( Ca_input_dend ))
for i in range( 60 ):
Ca_input_dend[ 3 + i * 3 ].conc = 2.0
Ca_input_PSD = moose.vec( '/model/chem/compt2/Ca_input' )
print((len( Ca_input_PSD )))
for i in range( 5 ):
Ca_input_PSD[ 2 + i * 2].conc = 1.0
# Create the output tables
num = compt0.numDiffCompts - 1
graphs = moose.Neutral( '/model/graphs' )
makeTab( 'Ca_soma', '/model/chem/compt0/Ca[0]' )
makeTab( 'Ca_d1', '/model/chem/compt0/Ca[1]' )
makeTab( 'Ca_d2', '/model/chem/compt0/Ca[2]' )
makeTab( 'Ca_d3', '/model/chem/compt0/Ca[3]' )
makeTab( 'Ca_s3', '/model/chem/compt1/Ca[3]' )
makeTab( 'Ca_s4', '/model/chem/compt1/Ca[4]' )
makeTab( 'Ca_s5', '/model/chem/compt1/Ca[5]' )
makeTab( 'Ca_p3', '/model/chem/compt2/Ca[3]' )
makeTab( 'Ca_p4', '/model/chem/compt2/Ca[4]' )
makeTab( 'Ca_p5', '/model/chem/compt2/Ca[5]' )
def makeTab( plotname, molpath ):
tab = moose.Table2( '/model/graphs/' + plotname ) # Make output table
# connect up the tables
moose.connect( tab, 'requestOut', moose.element( molpath ), 'getConc' );
def makeDisplay():
plt.ion()
fig = plt.figure( figsize=(10,12) )
dend = fig.add_subplot( 411 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Dend voxel #' )
plt.legend()
timeLabel = plt.text(200, 0.5, 'time = 0')
spine = fig.add_subplot( 412 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Spine voxel #' )
plt.legend()
psd = fig.add_subplot( 413 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'PSD voxel #' )
plt.legend()
timeSeries = fig.add_subplot( 414 )
timeSeries.set_ylim( 0, 2 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
plt.legend()
Ca = moose.vec( '/model/chem/compt0/Ca' )
Ca_input = moose.vec( '/model/chem/compt0/Ca_input' )
line1, = dend.plot( list(range( len( Ca ))), Ca.conc, label='Ca' )
line2, = dend.plot( list(range( len( Ca_input ))), Ca_input.conc, label='Ca_input' )
dend.set_ylim( 0, 2 )
Ca = moose.vec( '/model/chem/compt1/Ca' )
line3, = spine.plot( list(range( len( Ca ))), Ca.conc, label='Ca' )
spine.set_ylim( 0, 1 )
Ca = moose.vec( '/model/chem/compt2/Ca' )
Ca_input = moose.vec( '/model/chem/compt2/Ca_input' )
line4, = psd.plot( list(range( len( Ca ))), Ca.conc, label='Ca' )
line5, = psd.plot( list(range( len( Ca_input ))), Ca_input.conc, label='Ca_input' )
psd.set_ylim( 0, 1 )
fig.canvas.draw()
return ( timeSeries, dend, spine, psd, fig, line1, line2, line3, line4, line5, timeLabel )
def updateDisplay( plotlist ):
Ca = moose.vec( '/model/chem/compt0/Ca' )
Ca_input = moose.vec( '/model/chem/compt0/Ca_input' )
plotlist[5].set_ydata( Ca.conc )
plotlist[6].set_ydata( Ca_input.conc )
Ca = moose.vec( '/model/chem/compt1/Ca' )
plotlist[7].set_ydata( Ca.conc )
Ca = moose.vec( '/model/chem/compt2/Ca' )
Ca_input = moose.vec( '/model/chem/compt2/Ca_input' )
plotlist[8].set_ydata( Ca.conc )
plotlist[9].set_ydata( Ca_input.conc )
plotlist[4].canvas.draw()
def finalizeDisplay( plotlist, cPlotDt ):
for x in moose.wildcardFind( '/model/graphs/#[ISA=Table2]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = plotlist[0].plot( pos, x.vector, label=x.name )
plotlist[4].canvas.draw()
print( "Hit '0' to exit" )
try:
raw_input()
except NameError as e: #python3
input( )
def makeChemModel( compt, doInput ):
"""
This function setus up a simple chemical system in which Ca input
comes to the dend and to selected PSDs. There is diffusion between
PSD and spine head, and between dend and spine head.
:: Ca_input ------> Ca // in dend and spine head only.
"""
# create molecules and reactions
Ca = moose.Pool( compt.path + '/Ca' )
Ca.concInit = 0.08*1e-3
Ca.diffConst = diffConst
if doInput:
Ca_input = moose.BufPool( compt.path + '/Ca_input' )
Ca_input.concInit = 0.08*1e-3
Ca_input.diffConst = diffConst
rInput = moose.Reac( compt.path + '/rInput' )
moose.connect( rInput, 'sub', Ca, 'reac' )
moose.connect( rInput, 'prd', Ca_input, 'reac' )
rInput.Kf = 100 # 1/sec
rInput.Kb = 100 # 1/sec
else:
Ca_sink = moose.BufPool( compt.path + '/Ca_sink' )
Ca_sink.concInit = 0.08*1e-3
rSink = moose.Reac( compt.path + '/rSink' )
moose.connect( rSink, 'sub', Ca, 'reac' )
moose.connect( rSink, 'prd', Ca_sink, 'reac' )
rSink.Kf = 10 # 1/sec
rSink.Kb = 10 # 1/sec
def main():
"""
This example illustrates and tests diffusion embedded in
the branching pseudo-1-dimensional geometry of a neuron.
An input pattern of Ca stimulus is applied in a periodic manner both
on the dendrite and on the PSDs of the 13 spines. The Ca levels in
each of the dend, the spine head, and the spine PSD are monitored.
Since the same molecule name is used for Ca in the three compartments,
these are automagially connected up for diffusion. The simulation
shows the outcome of this diffusion.
This example uses an external electrical model file with basal
dendrite and three branches on
the apical dendrite. One of those branches has the 13 spines.
The model is set up to run using the Ksolve for integration and the
Dsolve for handling diffusion.
The timesteps here are not the defaults. It turns out that the
chem reactions and diffusion in this example are sufficiently fast
that the chemDt has to be smaller than default. Note that this example
uses rates quite close to those used in production models.
The display has four parts:
a. animated line plot of concentration against main compartment#.
b. animated line plot of concentration against spine compartment#.
c. animated line plot of concentration against psd compartment#.
d. time-series plot that appears after the simulation has
ended.
"""
makeModel()
plotlist = makeDisplay()
# Schedule the whole lot - autoscheduling already does this.
for i in range( 11, 17 ):
moose.setClock( i, chemdt ) # for the chem objects
moose.setClock( 10, diffdt ) # for the diffusion
moose.setClock( 18, plotdt ) # for the output tables.
moose.reinit()
t1 = time.time( )
for i in numpy.arange( 0, runtime, animationdt ):
moose.start( animationdt )
plotlist[10].set_text( "time = %d" % i )
updateDisplay( plotlist )
print( 'Total time taken %g' % ( time.time() - t1 ) )
finalizeDisplay( plotlist, plotdt )
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
BhallaLab/moose-examples
|
snippets/diffSpinyNeuron.py
|
Python
|
gpl-2.0
| 10,880
|
[
"MOOSE",
"NEURON"
] |
2b5117f4a3f0f22c1676a952932f5d3109da0b9e40def856b1e129b1a8113297
|
#!/usr/bin/env python
###############################################################################
# This script intends to create a model of disk and envelope (Keto&Zhang 2010)#
# And would produce SPARX compatible HDF and VTK file used for visualization #
###############################################################################
from tables import *
from numpy import *
from math import *
from scipy import optimize
disk=1
env=1
writevtk=1
# unit converter
Msun2MKS=(6.022E23/0.0028)*0.19889E+31/(100*0.14960E+12)**3
Au2MKS=100*0.14960E+12/(100*365.25*24*60*60)
Au2pc=100./206260
prho2tem=1E-3*1.3174*1.6611295681063124e-24*(Au2MKS**2)/1.3806488E-23
km2m=1e3
pc2m=30.857e15
pc2km=30.857e12
mean_molecular_mass=2*1.67*1.67e-27 # kg
# resolution and domain size
Rc_out = 0.05 #pc
Rc_in = 0.0
#Rc_in = 26.*0.0046491/206260 #pc
Z_max=0.05 #pc
stretch_ratioRc=1.02
stretch_ratioZ=1.02
nr=64
nz=128
np=1
# Physical parameter
#parameters from Keto&Zhang
rho_e0=7.9e4 *1e6 # Envelope density at Rd (m^-3)
Rd_int=6900 # (AU)
Ap=5.1
Mt=10.7 # stellar mass (Msun)
Rt=26.*0.0046491/206260
p=-1
BT=15.0
vk= 1.2 # Keplerian velocity (km/s)
Rd=float(Rd_int)/206260. # Centrifugal Radius (pc)
rho_d0=Ap*rho_e0
H0=0.01*Rt
Tt=30000.
Mar = rho_e0*4.*pi*Rd*Rd*vk*(mean_molecular_mass*pc2m**2*km2m) # mass accretion rate (kg/s)
G=4.302e-3 # gravitational constant (pc Msun^-1 (km/s)^2)
sigma=5.67037321e-8 # Stefan-Boltzmann constant (W m^-2 K^-4)
X_mol=3e-8
X_mol2=3e-10
V_t=3000.
#kapp_d="powerlaw, 1.199e+12, 1.000e+04, 1.500e+00"
kapp_d="table,jena_bare_e6"
T_cmb=2.73
gas_to_dust=100.0
molec=""
geom='cyl3d'
root="/"
if (disk==1 and env==1):
vtkfilea='2Denv_disk_Rd'+str(Rd_int)+'.vtk'
vtkfileb='2Denv_disk_vel_Rd'+str(Rd_int)+'.vtk'
elif(disk==0 and env==1):
vtkfilea='2Denv_Rd'+str(Rd_int)+'.vtk'
vtkfileb='2Denv_vel_Rd'+str(Rd_int)+'.vtk'
elif(disk==1 and env==0):
vtkfilea='2Ddisk_Rd'+str(Rd_int)+'.vtk'
vtkfileb='2Ddisk_vel_Rd'+str(Rd_int)+'.vtk'
def CubicEq(xx):
global pp,qq
return xx*xx*xx+pp*xx+qq
# Define a user record to characterize some kind of particles
class Particle(IsDescription):
LEVEL=Int32Col(pos=0)
POS=Int64Col(pos=1)
geom=StringCol(itemsize=6,pos=2)
X_max=Float64Col(shape=3,pos=3)
X_min=Float64Col(shape=3,pos=4)
X_cen=Float64Col(shape=3,pos=5)
n_H2=Float64Col(pos=6)
T_k=Float64Col(pos=7)
X_mol=Float64Col(pos=8)
X_pH2=Float64Col(pos=9)
X_oH2=Float64Col(pos=10)
X_e=Float64Col(pos=11)
X_H=Float64Col(pos=12)
X_He=Float64Col(pos=13)
V_t=Float64Col(pos=14)
V_edge=FloatCol(shape=(6,3),pos=15)
V_cen=FloatCol(shape=3,pos=16)
B_cen=FloatCol(shape=3,pos=17)
ds=FloatCol(pos=18)
NCHILDREN=Int64Col(pos=19)
NAXES=Int64Col(shape=3,pos=20)
T_d=Float64Col(pos=21)
kapp_d=StringCol(itemsize=64,pos=22)
T_ff=Float64Col(pos=23)
kapp_ff=StringCol(itemsize=64,pos=24)
T_bb=Float64Col(pos=25)
def writezone(direc,lev,position,xmax,xmin,naxes):
# Create ZONE table
table = h5file.createTable(direc, 'ZONE', Particle, "Grid table")
particle = table.row
particle['LEVEL'] = lev
particle['POS'] = position
particle['geom'] = geom
particle['X_max'] =[ xmax[0],xmax[1],xmax[2] ]
particle['X_min'] =[ xmin[0],xmin[1],xmin[2] ]
particle['X_cen'] =[ 0.5*(xmin[0]+xmax[0]),0.5*(xmin[1]+xmax[1]),0.5*(xmin[2]+xmax[2]) ]
particle['NCHILDREN'] =naxes[0]*naxes[1]*naxes[2]
particle['NAXES'] =naxes
#Insert a new particle record
particle.append()
table.flush()
del table.attrs.FIELD_0_FILL
del table.attrs.FIELD_1_FILL
del table.attrs.FIELD_2_FILL
del table.attrs.FIELD_3_FILL
del table.attrs.FIELD_4_FILL
del table.attrs.FIELD_5_FILL
del table.attrs.FIELD_6_FILL
del table.attrs.FIELD_7_FILL
del table.attrs.FIELD_8_FILL
del table.attrs.FIELD_9_FILL
del table.attrs.FIELD_10_FILL
del table.attrs.FIELD_11_FILL
del table.attrs.FIELD_12_FILL
del table.attrs.FIELD_13_FILL
del table.attrs.FIELD_14_FILL
del table.attrs.FIELD_15_FILL
del table.attrs.FIELD_16_FILL
del table.attrs.FIELD_17_FILL
del table.attrs.FIELD_18_FILL
del table.attrs.FIELD_19_FILL
del table.attrs.FIELD_20_FILL
del table.attrs.FIELD_21_FILL
del table.attrs.FIELD_22_FILL
del table.attrs.FIELD_23_FILL
del table.attrs.FIELD_24_FILL
del table.attrs.FIELD_25_FILL
del table.attrs.NROWS
def writegrid(direc,lev,naxes,Raxis,Paxis,Zaxis,nc):
global pp,qq
# Create GRID table
table = h5file.createTable(direc, 'GRID', Particle, "Grid table")
particle = table.row
density=zeros(naxes)
temperature=zeros(naxes)
Vx=zeros(naxes)
Vy=zeros(naxes)
Vz=zeros(naxes)
mass_env=0.
mass_disc=0.
total_volume = 0.
for i in range(naxes[0]):
for j in range(naxes[1]):
for k in range(naxes[2]):
# write a row of grid table
particle['LEVEL'] = lev+1
particle['POS'] = ( naxes[1] * i + j ) * naxes[2] + k
particle['geom'] = geom
particle['X_max'] =[ Raxis[i+1],Paxis[j+1],Zaxis[k+1] ]
particle['X_min'] =[ Raxis[i] ,Paxis[j] ,Zaxis[k] ]
particle['X_cen'] = [ 0.5*(Raxis[i]+Raxis[i+1]),
0.5*(Paxis[j]+Paxis[j+1]),
0.5*(Zaxis[k]+Zaxis[k+1]) ]
# write out the non-empty-leaf zone
Rc = particle['X_cen'][0]
phi = particle['X_cen'][1]
Z = particle['X_cen'][2]
R = sqrt( Rc * Rc + Z * Z)
theta = acos( Z / R )
pp = R/Rd-1.
qq = -cos(theta)*R/Rd
cos_theta0 = optimize.brentq(CubicEq, -1.,1.)
if (cos_theta0 > 1. or cos_theta0 < -1.):
print cos_theta0,pp,qq
volume = 0.5 * ( Raxis[i+1]**2 - Raxis[i]**2 ) * ( Paxis[j+1] - Paxis[j] ) * ( Zaxis[k+1] - Zaxis[k] )
total_volume = total_volume + volume
if(env==1):
density_env = rho_e0 * ((R/Rd)**(-1.5)) * ((1+cos(theta)/cos_theta0)**(-0.5)) * (1 + ((R/Rd)**(-1)) * (3*cos_theta0**2-1.0) )**(-1)
mass_env += density_env*volume
else:
density_env = 0.0
if (R<=Rd and disk==1):
rho_0 = rho_d0*(Rd/Rc)**2.25
H=H0*(Rc/Rt)**1.25
density_disc = rho_0*exp(-(R*R-Rc*Rc)/(2.*H*H))
mass_disc += density_disc*volume
else:
density_disc =0.
density[i,j,k] = density_env + density_disc
Vkep = sqrt(G*Mt/R)
Vp_disc = sqrt(G*Mt/Rc)
Vr_env = -Vkep*sqrt(1.+cos(theta)/cos_theta0)
Vt_env = Vkep*((cos_theta0-cos(theta))/sin(theta))*sqrt(1.+cos(theta)/cos_theta0)
Vp_env = Vkep*(sqrt(1.-cos_theta0*cos_theta0)/sin(theta))*sqrt(1.+cos(theta)/cos_theta0)
if(density[i,j,k]!=0.0):
Vr = (density_env*Vr_env)/density[i,j,k]
Vt = (density_env*Vt_env)/density[i,j,k]
Vp = (density_env*Vp_env+density_disc*Vp_disc)/density[i,j,k]
T_env = Tt*(Rt/(2.*R))**(2./(4+p))
T_disc = BT * ( (3.*G*Mt*Mar/(4.*pi*pc2km*pc2km*Rc*Rc*Rc*sigma)) * (1.-sqrt(Rt/Rc)) )**0.25
temperature[i,j,k]=(density_disc*T_disc+density_env*T_env)/density[i,j,k]
else:
Vr = 0.0
Vt = 0.0
Vp = 0.0
temperature[i,j,k] = 0.0
if(writevtk):
Vx[i,j,k] = cos(phi)*sin(theta)*Vr + cos(phi)*cos(theta)*Vt -sin(phi)*Vp
Vy[i,j,k] = sin(phi)*sin(theta)*Vr + sin(phi)*cos(theta)*Vt +cos(phi)*Vp
Vz[i,j,k] = cos(theta)*Vr - sin(theta)*Vt
particle['n_H2'] = density[i,j,k]
particle['V_cen'] = [
km2m * ( Vr*sin(theta) + Vt*cos(theta) ),
km2m * Vp,
km2m * ( Vr*cos(theta) - Vt*sin(theta) )
]
particle['T_k'] = temperature[i,j,k]
if ( temperature[i,j,k] >= 90.0 ):
particle['X_mol'] = X_mol
else:
particle['X_mol'] = X_mol2
#particle['X_mol'] = X_mol
particle['V_t'] = V_t
particle['T_d'] = particle['T_k']
particle['kapp_d'] = kapp_d
nc=nc+1
# Insert a new particle record
particle.append()
mass_env=mass_env*pc2m**3*mean_molecular_mass/0.19889E+31
mass_disc=mass_disc*pc2m**3*mean_molecular_mass/0.19889E+31
print 'Total envelope mass =',mass_env,'(solar mass)'
print 'Total disc mass =',mass_disc,'(solar mass)'
print 'Total mass =',mass_env+mass_disc,'(solar mass)'
print 'Total volume =',total_volume,'(pc^3)'
if (writevtk):
fvtk1=open(vtkfilea, mode = "w")
print >>fvtk1,'# vtk DataFile Version 3.0'
print >>fvtk1,'ENV_DISK'
print >>fvtk1,'ASCII'
print >>fvtk1,'DATASET STRUCTURED_GRID'
print >>fvtk1,'DIMENSIONS %(0)d %(1)d %(2)d'%{'0':nr+1,'1':np+1,'2':nz+1}
print >>fvtk1,'POINTS %(0)d float'%{'0':(nr+1)*(nz+1)*(np+1)}
for k in range(nz+1):
for j in range(np+1):
for i in range(nr+1):
if(j==0):
print >>fvtk1,'%(0)e %(1)d %(2)e'%{'0':Rc_p[i],'1':0,'2':Z_p[k]}
elif(j==1):
print >>fvtk1,'%(0)e %(1)e %(2)e'%{'0':Rc_p[i],'1':1e-6,'2':Z_p[k]}
print >>fvtk1,'CELL_DATA %(0)d'%{'0':naxes[0]*naxes[1]*naxes[2]}
print >>fvtk1,'SCALARS density float 1'
print >>fvtk1,'LOOKUP_TABLE default'
for k in range(naxes[2]):
for j in range(naxes[1]):
for i in range(naxes[0]):
print >>fvtk1,'%(0)e'%{'0':density[i,j,k]},
print >>fvtk1,'SCALARS temperature float 1'
print >>fvtk1,'LOOKUP_TABLE default'
for k in range(naxes[2]):
for j in range(naxes[1]):
for i in range(naxes[0]):
print >>fvtk1,'%(0)e'%{'0':temperature[i,j,k]},
fvtk1.close()
fvtk2=open(vtkfileb, mode = "w")
print >>fvtk2,'# vtk DataFile Version 3.0'
print >>fvtk2,'ENV_DISK'
print >>fvtk2,'ASCII'
print >>fvtk2,'DATASET STRUCTURED_GRID'
print >>fvtk2,'DIMENSIONS %(0)d %(1)d %(2)d'%{'0':nr,'1':nz,'2':np}
print >>fvtk2,'POINTS %(0)d float'%{'0':nr*nz*np}
for j in range(nz):
for i in range(nr):
print >>fvtk2,'%(0)e %(1)d %(2)e'%{'0':0.5*(Rc_p[i]+Rc_p[i+1]),'1':0,'2':Z_p[j]}
print >>fvtk2,'POINT_DATA %(0)d'%{'0':naxes[0]*naxes[1]*naxes[2]}
print >>fvtk2,'VECTORS velocity float'
for k in range(naxes[2]):
for j in range(naxes[1]):
for i in range(naxes[0]):
print >>fvtk2,'%(0)e %(1)e %(2)e'%{'0':-Vx[i,j,k],'1':-Vy[i,j,k],'2':Vz[i,j,k]}
fvtk2.close()
table.flush()
del table.attrs.FIELD_0_FILL
del table.attrs.FIELD_1_FILL
del table.attrs.FIELD_2_FILL
del table.attrs.FIELD_3_FILL
del table.attrs.FIELD_4_FILL
del table.attrs.FIELD_5_FILL
del table.attrs.FIELD_6_FILL
del table.attrs.FIELD_7_FILL
del table.attrs.FIELD_8_FILL
del table.attrs.FIELD_9_FILL
del table.attrs.FIELD_10_FILL
del table.attrs.FIELD_11_FILL
del table.attrs.FIELD_12_FILL
del table.attrs.FIELD_13_FILL
del table.attrs.FIELD_14_FILL
del table.attrs.FIELD_15_FILL
del table.attrs.FIELD_16_FILL
del table.attrs.FIELD_17_FILL
del table.attrs.FIELD_18_FILL
del table.attrs.FIELD_19_FILL
del table.attrs.FIELD_20_FILL
del table.attrs.FIELD_21_FILL
del table.attrs.FIELD_22_FILL
del table.attrs.FIELD_23_FILL
del table.attrs.FIELD_24_FILL
del table.attrs.FIELD_25_FILL
del table.attrs.NROWS
return nc
#### GRID GENERATION ####
# Coordinate : RADIUS points (streching)
r0 = (Rc_out-Rc_in)*(stretch_ratioRc-1.)/(stretch_ratioRc**(nr)-1.)
Rc_p = zeros(nr+1)
for i in range(nr+1):
if (i==0):
Rc_p[i] = Rc_in
dRc = r0
else:
Rc_p[i] = Rc_p[i-1]+dRc
dRc = dRc*stretch_ratioRc
# Coordinate : PHI points (phi symmetric, np=1)
phi_p = zeros(np+1)
phi_p[0]=0.0
phi_p[np]=2.0*pi
# Coordinate : THETA points (streching)
Z0 = Z_max*(stretch_ratioZ-1.)/(stretch_ratioZ**(nz/2)-1.)
Z_p = zeros(nz+1)
for j in range(nz+1):
if (j==0):
Z_p[j] = -Z_max
dz=Z0*stretch_ratioZ**(nz/2-1)
elif (j<nz/2):
Z_p[j]=Z_p[j-1]+dz
dz=dz/stretch_ratioZ
elif (j==nz/2):
Z_p[j] = 0.0
dz=Z0
elif (j>nz/2):
Z_p[j] = Z_p[j-1]+dz
dz=dz*stretch_ratioZ
elif (j==nz):
Z_p[j] = Z_max
########################
filename = "model_env_disk"
h5file = openFile(filename, mode = "w", title = "Test file")
h5file.delNodeAttr("/", "TITLE", name=None)
h5file.delNodeAttr("/", "CLASS", name=None)
h5file.delNodeAttr("/", "VERSION", name=None)
h5file.delNodeAttr("/", "PYTABLES_FORMAT_VERSION", name=None)
h5file.setNodeAttr("/", "molec", molec, name=None)
h5file.setNodeAttr("/", "T_cmb", T_cmb, name=None)
h5file.setNodeAttr("/", "gas_to_dust", gas_to_dust, name=None)
h5file.setNodeAttr("/", "velfield", "grid ", name=None)
writezone(root,-1,0, [Rc_p[nr],phi_p[np],Z_p[nz]], [Rc_p[0],phi_p[0],Z_p[0]], [nr,np,nz])
ncell=writegrid(root,-1, [nr,np,nz], Rc_p, phi_p, Z_p, 0)
h5file.close()
print 'Total cells=',ncell
if (writevtk):
print "Wrote out",vtkfilea,vtkfileb
|
itahsieh/sparx-alpha
|
preprocessor/script/CYL2D_preprocessor.py
|
Python
|
gpl-3.0
| 12,902
|
[
"VTK"
] |
bd03dcbf498da9c5c0a9f4d055e98563ff3955a60d1ee463b3c6f378249599ac
|
#!/usr/bin/env python
# Kyle Hernandez
# Although not required in any sense, share the love and pass on attribution
# when using or modifying this code.
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along with
# this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>
#
import sys
import time
def main():
"""
---------------------------------------------------------------------------
AUTHOR: Kyle Hernandez
EMAIL: [email protected]
Takes the best hits of FH on Foxtail and extracts those locations from the
Foxtail genome. Creates a fasta file of these sites for blasting.
---------------------------------------------------------------------------
USAGE: ExtractFox.py inbest.tab infasta.fa outfasta.fa
ARGUMENTS:
inbest.tab - Tab delimited output from Blast Parser
infasta.fa - The Foxtail reference
outfasta.fa - The output file for the cut Foxtail reference.
"""
cut_dict = get_cut_sites()
process_reference(cut_dict)
def get_cut_sites():
"""
Reads the blast output and returns a dict of start/stop positions 0-index scaled
"""
dic = {}
with open(inbest, 'rU') as f:
for line in f:
cols = line.rstrip().split('\t')
ch = cols[1]
p1 = min([int(cols[8]), int(cols[9])])
p2 = max([int(cols[8]), int(cols[9])])
if p1 - 1 < 0: start = 0
else: start = p1 - 1
val = (start, p2)
if ch not in dic: dic[ch] = []
dic[ch].append(val)
return dic
def process_reference(dic):
"""
Reads in the Foxtail fasta reference file and cuts at given positions.
"""
print 'Processing reference...'
curr_scaff = ''
curr_seq = []
o = open(outfasta, 'wb')
with open(infasta, 'rU') as f:
for line in f:
if line.startswith('>') and not curr_scaff:
curr_scaff = line.rstrip().split('>')[1]
elif line.startswith('>') and curr_scaff:
seq = ''.join(curr_seq)
cuts = process_scaff(curr_scaff, dic[curr_scaff], seq)
[print_cuts(j, curr_scaff, o) for j in cuts]
curr_scaff = line.rstrip().split('>')[1]
curr_seq = []
else:
curr_seq.append(line.rstrip())
seq = ''.join(curr_seq)
cuts = process_scaff(curr_scaff, dic[curr_scaff], seq)
[print_cuts(j, curr_scaff, o) for j in cuts]
o.close()
def process_scaff(scf, pos, seq):
"""
Returns cut sites.
"""
for i in pos:
if not seq[i[0]:i[1]]:
print scf, i, len(seq)
return [(i, seq[i[0]:i[1]]) for i in pos if seq[i[0]:i[1]]]
def print_cuts(c, s, o):
"""
Write cut reference to file
"""
o.write('>' + str(s) + '_' + str(c[0][0]) + '_' + str(c[0][1]) + '\n')
o.write(c[1] + '\n')
if __name__ == '__main__':
start = time.time()
if len(sys.argv) != 4:
print main.__doc__
sys.exit()
inbest = sys.argv[1]
infasta = sys.argv[2]
outfasta = sys.argv[3]
main()
print "Finished; Took:", time.time() - start, "seconds."
|
kmhernan/Publications
|
Lowry-2013-FxH-Map/python/SyntenyScripts/extract_fox.py
|
Python
|
unlicense
| 3,469
|
[
"BLAST"
] |
ba84fd3a832ff78cb539d22ddefc85f320786fdf25474753e9e72070aa1fc543
|
import genepy
import numpy as np
import os
from Bio import SeqRecord
# NEXT TIME :
# Two errors to deal with :
# - Err.: one or more missing sequences in block 2
# --- solutions - Guindon ?
# --- solutions - read .phy generated by ClustalO, and rewrite it using BioPython
# - Duplicate names in PHYLIP files due to truncation. Way around ?
# .remove() - remove some sequences from the array
# Base sequence list class
class seqarray :
"""GenePy Sequence Array object.
For documentation, see http://github.io/QCaudron/genepy
"""
def __init__(self, source) :
"""Constructor.
Argument : a filename or a list of strings that represent sequences.
- mysequences = genepy.seqarray("from_genbank.gb") -- loads the sequences
in from_genbank.gb as BioPython Bio.Seq objects.
- mysequences = genepy.seqarray(seq_list), where seq_list is a list of
strings ( such as ["ACTG", "AGTA", "TTGC"] ) converts these to BioPython
Bio.Seq objects using the generic_dna alphabet ( for now ).
"""
# If we're reading in a sequence set from a file
if type(source) is str :
if os.path.isfile(source) :
self.seq = genepy.readalignment(source)
self.filename = source
else :
print "%s not found, aborting." % source
# If we're fed a list
elif type(source) is list :
self.seq = [SeqRecord.SeqRecord(s) for s in source]
self.filename = "genepy.fasta"
else :
raise TypeError("Expected a filename or a list of strings.")
# Generate static members
self.update()
def __str__(self) :
"""Long string representation of a genepy.seqarray object."""
out = self.__repr__()
out += ("-- C+G content : %.03f\n" % (self.statistics["C"].mean() + self.statistics["G"].mean()))
out += ("-- From file : %s" % self.filename.split("/")[-1])
return out
def __repr__(self) :
"""Short string representation of a genepy.seqarray object."""
summary = "GenePy sequence array (genepy.seqarray) :\n"
summary += "-- Sequences : %d\n" % self.len
summary += "-- Mean length : %.01f (min %d, max %d)\n" % \
(np.array(self.seq_len).mean(), np.min(self.seq_len), np.max(self.seq_len))
return summary
def __iter__(self) :
"""Iterator function."""
self.it = 0
return self
def next(self) :
"""Next object in iteration."""
if self.it == self.len :
raise StopIteration
else :
self.it += 1
return self.seq[self.it - 1]
def update(self) :
"""Updates the member variables of a genepy.seqarray object.
This function is called whenever sequences are aligned or trimmed.
Any changes made directly to genepy.seqarray variables ( such as to the
sequence list, genepy.seqarray.seq ), will not be reflected in other
member variables ( such as genepy.seqarray.len ) until this function
is called. In general, as long as the user calls genepy.seqarray methods
only, and no changes are otherwise made to the object, this method does
not need to be used.
"""
# Number of sequences
self.len = len(self.seq)
# Sequence lengths
self.seq_len = np.array([len(s.seq) for s in self.seq])
# Alignment numerical array
l = self.seq_len.max() if type(self.seq_len) == np.ndarray else self.seq_len
self.array = genepy.alignmentarray(self.seq, length = l)
# Statistics
self.statistics = genepy.calcstats(self.seq)
# Show sequences
def show(self) :
"""Display the sequences visually as a matplotlib.pyplot.imshow()
Colours :
-- A : dark green
-- C : dark red
-- G : orange
-- T : light green
-- unknown / empty : black
Cytosine and guanine are represented by "warm" colours; adenine and
thymine are shown in "cold" colours.
"""
genepy.showalignment(self.array)
# Align sequences
def align(self, force = True, it = False, full = False, full_iter = False, auto = True, threads = False) :
"""Align the array of sequences using ClustalO.
-- force : True / False; overwrite filename, if it exists
-- it : False, integers > 0; iterate the guide tree
-- full : True / False; use full distance matrix for guide-tree calculation
-- full_iter : True / False; use full distance matrix during iteration only
-- auto : True / False; automatically select options for speed and accuracy
-- threads : False, integers > 0; limit the number of threads; False uses all
"""
# System call to ClustalO
genepy.align(self.filename, force, threads, full, full_iter, it, auto)
# Read aligned sequence array
self.seq = genepy.readalignment(os.path.splitext(self.filename)[0] + "_aligned_genepy.phy")
# Update static members
self.update()
def phylotree(self, nucleotide_frequency = "empirical", bootstrap = -4, search_algorithm = "BEST") :
"""Construct a phylogenetic tree using PhyML.
-- nucleotide_frequency : "empirical" or "max_likelihood"
-- bootstrap : -4 for SH-like branch supports only; -2 for Chi^2;
-1 for approximate likelihood ratio; 0 for no bootstrapping,
integers > 0 for the number of bootstraps to perform, will try to use MPI
-- search_algorithm : "NNI" for nearest-neighbour interchange; "SPR" for subtree
pruning and regrafting; "BEST" for best of both
"""
if not os.path.isfile(os.path.splitext(self.filename)[0] + "_aligned_genepy.phy") :
print "GenePy can't find an aligned sequence file for %s.\nTry calling .align()." % \
self.filename.split("/")[-1]
return
genepy.phylotree(self.filename, nucleotide_frequency, bootstrap, search_algorithm)
def stats(self) :
"""Display sequence array statistics."""
# Display statistics
genepy.stats(self.statistics)
def trimalignment(self, array = None, left = None, right = None) :
"""Trim the sequence array by a given number of nucleotides from left and right.
left, right : like performing mysequences.seq = mysequences.seq[left:right]
"""
self.seq = genepy.trimalignment(self.seq, array, left, right)
self.update()
def dropempties(self, fraction = 0.5) :
"""Remove any sequence containing less than a fraction of known nucleotides.
fraction : between 0 and 1.
Useful after trimming to a given region of the genome."""
self.seq = genepy.dropempties(self.seq, fraction)
self.update()
|
QCaudron/genepy
|
genepy/seqarray.py
|
Python
|
mit
| 6,327
|
[
"Biopython"
] |
5a08932a7d8506a509633804f9fbe5deb4d85dbb9707960c228a8b4962f92b87
|
#!/usr/bin/python
# -*- coding: utf8 -*-
import numpy as np
import math
import time
from scipy.ndimage import correlate1d
def convol1d(array,kernel,scale_factor=None):
"""
The convol1d function convolves an array with a kernel 1D,
and returns the result. Convolution is a general process
that can be used for various types of smoothing, signal
processing, shifting, differentiation, edge detection, etc.
"""
row = array.shape[0]
column = array.shape[1]
R = np.zeros([row,column])
m = len(kernel)
if scale_factor == None:
r=correlate1d(array,kernel)
R[:,int(m/2):column-int(math.ceil(m/2.))+1]=r[:,int(m/2):column-int(math.ceil(m/2.))+1]
kernel=kernel/float(scale_factor)
r=correlate1d(array,kernel)
R[:,int(m/2):column-int(math.ceil(m/2.))+1]=r[:,int(m/2):column-int(math.ceil(m/2.))+1]
return R
def sconvol1d(arreglo,kernel=None,scale_factor=1.,fwhm=None,std=None):
"""
This program will smooth a 2D array, including the edges,
with one-dimensional kernels. Problems of this kind arise when,
e.g. an array is to be convolved with a 2D symmetric
gaussian, which is separable into two one-dimensional
convolutions.
"""
#~ s=len(arreglo.shape)
dims = np.ndim(arreglo)
rows = arreglo.shape[0]
collumns = arreglo.shape[1]
if dims != 2:
raise ValueError('Array must be 2-dimensional')
if kernel == None:
if (fwhm==None) and (std==None):
raise ValueError('Convolve with what?')
elif fwhm != None:
std=fwhm/(2.*math.sqrt(2.*math.log(2.)))
#~ elif std != None:
#~ std=std
elif std != None:
width=int(std*9.)
if width%2 == 0:
width+=1
kernel=np.arange(float(width))-width/2
kernel=np.exp(-(kernel*kernel)/(2.*std*std))
kernel=kernel/(std*math.sqrt(2.*math.pi))
else:
width=len(kernel)
if width%2 == 0:
raise ValueError('Dimension of kernel must be odd')
big=np.empty([arreglo.shape[0]+width-1,arreglo.shape[1]+width-1])
edge=int(width/2)
big[edge:big.shape[0]-edge,edge:big.shape[1]-edge]=arreglo
for i in range(0,edge):
big[edge:big.shape[0]-edge,i]=arreglo[:,edge-1-i]
big[edge:big.shape[0]-edge,arreglo.shape[1]+edge+i]=arreglo[:,arreglo.shape[1]-1-i]
#~ big=convol1d(big,kernel,scale_factor)
big = correlate1d(big,(kernel/scale_factor),mode="constant",cval=np.nan)
big[np.isnan(big)]=0.0
big=np.rot90(big,-1)
for i in range(0,edge):
big[:,i] = big[:,2*edge-1-i]
big[:,arreglo.shape[0]+edge+i] = big[:,arreglo.shape[0]+edge-1-i]
#~ big=convol1d(big,kernel,scale_factor)
big = correlate1d(big,(kernel/scale_factor),mode="constant",cval=np.nan)
big[np.isnan(big)]=0.0
big=np.rot90(big,-3)
big=big[edge:arreglo.shape[0]+edge,edge:arreglo.shape[1]+edge]
return big
#~ start = time.time()
#~ a=np.linspace(0,math.pi,30).reshape([5,6])
#~ kernel = np.array([1,2,3,2,1])
#~ print sconvol1d(a,std=0.10616525)
#~ print (time.time() - start), " seconds"
|
Hypnus1803/FlowMapsGUI
|
GUI/MainCodes/convol.py
|
Python
|
bsd-2-clause
| 2,872
|
[
"Gaussian"
] |
63f485b738d21954df4a59c3c469d47ac9c7d9c513566ae50d68e7393ed4ef3c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.cm
import snn_utils.plotter as plotter
import snn_utils.plotter.backends.mpl as mpl_plotter
import snn_utils.plotter.interface as interface
import snn_utils.plotter.plots as plots
from config import *
from snn_utils.comm.serializer import SERIALIZERS
from snn_utils.plotter import data_provider
def split_list(l, n_chunks):
n_elements_per_chunk = len(l) // n_chunks
for i in range(0, len(l), n_elements_per_chunk):
yield l[i:i + n_elements_per_chunk]
def configure_detail_plot(data_source):
p = plotter.PlotWindowBuilder(auto_vertical_padding=0.2)
c1 = p.add_column(x_spines=True)
pattern_colors = list(matplotlib.cm.plasma(np.linspace(0.2, 0.8, n_patterns)))
c1.add_plot(plots.PhasePlot(data_source, 'pattern_id', n_patterns, label="cue", y_pos=0,
common_line_style={'linewidth': 5},
colors=pattern_colors, zero_is_value=False), height_ratio=0.5
)
c1.add_plot(plots.SpikeTrainPlot(data_source,
[('pattern_in', i) for i in range(n_input_neurons)],
label='input activity (#neurons = {})'.format(n_input_neurons),
colors='black'),
height_ratio=1.5)
n_out = n_up_neurons + n_down_neurons
c1.add_row_group([plots.SpikeTrainPlot(data_source, subset, colors=pattern_colors[i])
for i, subset in enumerate(split_list([('activity_in', i) for i in range(n_out)],
n_patterns))],
vertical_padding_ratio=0.1, height_ratio=[0.5] * n_patterns, hlines=True,
label="Up/Down neuron activity".format(n_out))
c1.add_plot(plots.AnalogSignalPlot(data_source,
['activity_rate_{}'.format(i) for i in range(n_patterns)],
label='lever position', colors=pattern_colors,
y_ticks=[], y_lim=(-100.0, 100.0 )))
c1.add_plot(plots.AnalogSignalPlot(data_source,
['curr', 'mean'], 'reward', ['current', 'average'],
y_lim=(0.0, 1.0), y_ticks=[]))
return p
if __name__ == '__main__':
master = interface.Master()
# data sources
data_source = data_provider.ProxyDataSource(sender=False)
# subscriptions
comm = master.communicator()
comm.add_subscriber(music_zmq_proxy_config['communication']['host'],
music_zmq_proxy_config['communication']['port'],
lambda delta: data_source.read_delta(delta),
deserialize=SERIALIZERS[music_zmq_proxy_config['communication']['format']].deserialize)
mpl_plotter.configure_matplotlib()
window = mpl_plotter.MatplotlibWindow(configure_detail_plot(data_source),
data_source=data_source,
max_time_window=plotter_node_time_window)
master.scheduler().add_handle(lambda: window.draw(), 0.3)
master.scheduler().add_handle(
lambda: data_source.truncate(lower=data_source.get_max_time() - plotter_node_time_window)
if data_source.get_max_time() is not None else None,
10000)
master.mainloop()
|
IGITUGraz/spore-nest-module
|
examples/lever_press_showcase/python/interface.py
|
Python
|
gpl-2.0
| 3,490
|
[
"NEURON"
] |
dde2c975be811329320285e0436050df60b0bf3ac1f57721d15719fbfbceb67c
|
#
#-*- coding:utf-8 -*-
"""
Gentoo-keys - base.py
Command line interface argsparse options module
and common functions
@copyright: 2012-2015 by Brian Dolbec <[email protected]>
@license: GNU GPL2, see COPYING for details.
"""
from __future__ import print_function
import argparse
import os
import sys
import copy
from gkeys.fileops import ensure_dirs
from gkeys.log import log_levels, set_logger
from gkeys.gkey import GKEY
if sys.version_info[0] >= 3:
from urllib.request import urlopen
py_input = input
_unicode = str
else:
from urllib2 import urlopen
py_input = raw_input
_unicode = unicode
if sys.version_info[0] >= 3:
unicode = str
class Args(object):
'''Basic argsparser replacement for using gkeys Actions via an API
Holds the full spectrum of possible options supported.
Not all options used by all actions.'''
def __init__(self):
self.action = None
self.all = False
self.category = None
self.cleankey = False
self.destination = None
self.exact = False
self.filename = None
self.fingerprint = None
self.keyid = None
self.keyring = None
self.keys = None
self.nick = None
self.name = None
self.keydir = None
self.seedfile = None
self.signature = None
self.status = False
self.timestamp = None
self.uid = None
self.fetchonly = None
class CliBase(object):
'''Common cli and argsparse options class'''
def __init__(self):
self.cli_config = {
'Actions': None,
'Available_Actions': [],
'Action_Map': {},
'Base_Options': [],
'prog': 'gkeys',
'description': 'Gentoo-keys manager program',
'epilog': '''Caution: adding UNTRUSTED keys can be HAZARDOUS to your system!'''
}
self.config = None
self.args = None
self.seeds = None
self.actions = None
self.logger = None
self.version = None
self.need_Action = True
@staticmethod
def _option_status(parser=None):
parser.add_argument('-A', '--status', action='store_true',
default=False,
help='The active status of the member')
@staticmethod
def _option_all(parser=None):
parser.add_argument('-a', '--all', dest='all',
action='store_true', default=False,
help='Match all inputs arguments in searches')
@staticmethod
def _option_category(parser=None):
parser.add_argument('-C', '--category',
dest='category', default=None,
help='The key or seed directory category to use or update')
@staticmethod
def _option_cleankey(parser=None):
parser.add_argument('--clean-key',
dest='cleankey', default=False,
help='Clean the key from the keyring due to failures')
@staticmethod
def _option_cleanseed(parser=None):
parser.add_argument('--clean-seed',
dest='cleanseed', default=False,
help='Clean the seed from the seedfile due to failures. '
'Used during binary keyring release creation.')
@staticmethod
def _option_dest(parser=None):
parser.add_argument('-d', '--dest', dest='destination', default=None,
help='The destination for move, copy, create operations')
@staticmethod
def _option_exact(parser=None):
parser.add_argument('-e', '--exact', dest='exact',
action='store_true', default=False,
help='Use CASE matching in searches')
@staticmethod
def _option_fetchonly(parser=None):
parser.add_argument('--fetchonly',
dest='fetchonly', default=False,
help="Only fetch the seed file if there is an update or doesn't exist locally")
@staticmethod
def _option_file(parser=None):
parser.add_argument('-F', '--file', dest='filename', default=None,
nargs='+',
help='The path/URL to use for the (signed) file')
@staticmethod
def _option_1file(parser=None):
parser.add_argument('-F', '--file', dest='filename', default=None,
help='The path/URL to use for the (signed) file')
@staticmethod
def _option_fingerprint(parser=None):
parser.add_argument('-f', '--fingerprint', dest='fingerprint',
default=None, nargs='+',
help='The fingerprint(s) of the the key or subkey')
@staticmethod
def _option_gpgsearch(parser=None):
parser.add_argument('-g', '--gpgsearch', dest='gpgsearch',
action='store_true', default=False,
help='Do a gpg search operation, rather than a gkey search')
@staticmethod
def _option_homedir(parser=None):
parser.add_argument('-H', '--homedir', dest='homedir', default=None,
help='The destination for the generated key')
@staticmethod
def _option_keyid(parser=None):
parser.add_argument('-i', '--keyid', dest='keyid', default=None,
nargs='+',
help='The long keyid of the gpg key to search for')
@staticmethod
def _option_justdoit(parser=None):
parser.add_argument('--justdoit', dest='justdoit',
action='store_true', default=False,
help='Just Do It')
@staticmethod
def _option_keyring(parser=None):
parser.add_argument('-k', '--keyring', dest='keyring', default=None,
help='The name of the keyring to use for verification, etc.')
@staticmethod
def _option_keys(parser=None):
parser.add_argument('-K', '--keys', dest='keys', nargs='*',
default=None,
help='The fingerprint(s) of the primary keys in the keyring.')
@staticmethod
def _option_mail(parser=None):
parser.add_argument('-m', '--mail', dest='mail', default=None,
help='The email address to search for or use.')
@staticmethod
def _option_nick(parser=None):
parser.add_argument('-n', '--nick', dest='nick', default=None,
help='The nick associated with the the key')
@staticmethod
def _option_name(parser=None):
parser.add_argument('-N', '--name', dest='name', nargs='*',
default=None, help='The name of the the key')
@staticmethod
def _option_1name(parser=None):
parser.add_argument('-N', '--name', dest='name',
default=None, help='The name of the the key')
@staticmethod
def _option_keydir(parser=None):
parser.add_argument('-r', '--keydir', dest='keydir', default=None,
help='The keydir to use, update or search for/in')
@staticmethod
def _option_seedfile(parser=None):
parser.add_argument('-S', '--seedfile', dest='seedfile', default=None,
help='The seedfile to use from the gkeys.conf file')
@staticmethod
def _option_signature(parser=None):
parser.add_argument('-s','--signature', dest='signature', default=None,
help='The path/URL to use for the signature')
@staticmethod
def _option_spec(parser=None):
parser.add_argument('-S', '--spec', dest='spec', default=None,
help='The spec file to use from the gkeys-gen.conf file')
@staticmethod
def _option_timestamp(parser=None):
parser.add_argument('-t', '--timestamp', dest='timestamp',
action='store_true', default=False,
help='Turn on timestamp use')
@staticmethod
def _option_uid(parser=None):
parser.add_argument('-u', '--uid', dest='uid', nargs='+', default=None,
help='The user ID, gpg key uid')
@staticmethod
def _option_email(parser=None):
parser.add_argument('-E', '--email', dest='email', default=None,
help='Email parameter for sending email reminders')
@staticmethod
def _option_user(parser=None):
parser.add_argument('-U', '--user', dest='user', default=None,
help='User parameter for service login')
def parse_args(self, argv):
'''Parse a list of aruments
@param argv: list
@returns argparse.Namespace object
'''
#self.logger.debug('CliBase: parse_args; args: %s' % args)
parser = argparse.ArgumentParser(
prog=self.cli_config['prog'],
description=self.cli_config['description'],
epilog=self.cli_config['epilog'])
# options
parser.add_argument('-c', '--config', dest='config', default=None,
help='The path to an alternate config file')
parser.add_argument('-D', '--debug', default='DEBUG',
choices=list(log_levels),
help='The logging level to set for the logfile')
parser.add_argument('-V', '--version', action = 'version',
version = self.version)
# Add any additional options to the command base
self._add_options(parser, self.cli_config['Base_Options'])
if self.cli_config['Available_Actions']:
subparsers = parser.add_subparsers(
title='Subcommands',
description='Valid subcommands',
help='Additional help')
for name in self.cli_config['Available_Actions']:
actiondoc = self.cli_config['Action_Map'][name]['desc']
try:
text = actiondoc.splitlines()[0]
except AttributeError:
text = ""
action_parser = subparsers.add_parser(
name,
help=text,
description=actiondoc,
formatter_class=argparse.RawDescriptionHelpFormatter)
action_parser.set_defaults(action=name)
options = self.cli_config['Action_Map'][name]['options']
self._add_options(action_parser, options)
parsed_args = parser.parse_args(argv)
action = getattr(parsed_args, 'action', None)
if self.need_Action and not action:
parser.print_usage()
sys.exit(1)
elif action in ['---general---', '----keys-----', '----seeds----']:
parser.print_help()
sys.exit(1)
return parsed_args
def _add_options(self, parser, options):
for opt in options:
getattr(self, '_option_%s' % opt)(parser)
def warning_output(self, info):
''' We don't want this message to be spammed 4 times everytime gkeys is run'''
if "Re-fetch cycle timeout of" not in info:
print(info)
def setup(self, args, configs):
'''Set up the args and configs passed in
@param args: list or argparse.Namespace object
@param configs: list
'''
message = None
if not args:
message = "Main: run; invalid args argument passed in"
if isinstance(args, list):
args = self.parse_args(args)
if args.config:
self.config.defaults['config'] = args.config
self.config.defaults['configdir'] = os.path.dirname(args.config)
if getattr(args, 'email', False):
configs = [self.config.defaults['config'], os.path.abspath(os.path.join(self.config.defaults['configdir'], "email.conf"))]
self.config.read_config(configs)
else:
self.config.read_config()
else:
self.config.read_config(configs)
# check for permissions and adjust configs accordngly
if not self.config.defaults['homedir']:
self.config.defaults['homedir'] = os.path.expanduser('~')
if not os.access(self.config['logdir'], os.W_OK):
self.config.options['logdir'] = os.path.join(self.config['userconfigdir'], 'logs')
ensure_dirs(self.config.options['logdir'])
# establish our logger and update it in the imported files
self.logger = set_logger(self.cli_config['prog'], self.config['logdir'], args.debug,
dirmode=int(self.config.get_key('permissions', 'directories'),0),
filemask=int(self.config.get_key('permissions', 'files'),0))
self.config.logger = self.logger
if message:
self.logger.error(message)
# now that we have a logger, record the alternate config setting
if args.config:
self.logger.debug("Main: run; Found alternate config request: %s"
% args.config)
self.logger.debug("Main: run; Using config: %s" % self.config['config'])
# check if a -C, --category was input
# if it was, check if the category is listed in the [seeds]
cat = None
if 'category' in args:
cat = args.category
if not self._check_category(cat):
return False
return True
def run(self, args):
'''Run the action selected
@param args: list of argumanets to parse
'''
# establish our actions instance
self.actions = self.cli_config['Actions'](self.config, self.output_results, self.logger)
# run the action
func = getattr(self.actions, '%s'
% self.cli_config['Action_Map'][args.action]['func'])
self.logger.debug('Main: run; Found action: %s' % args.action)
self.logger.debug('Main: run; args: %s' % str(args.__dict__))
success, results = func(args)
if not results:
print("No results found. Check your configuration and that the",
"seed file exists.")
return success
if self.config.options['print_results'] and 'done' not in list(results):
self.output_results(results, '\n Gkey task results:')
return success
@staticmethod
def output_results(results, header=None):
# super simple output for the time being
if header:
print(header)
for msg in results:
if type(msg) in [str, unicode]:
print(' ', msg)
else:
try:
print(unicode("\n").join([x.pretty_print for x in msg]))
except AttributeError:
for x in msg:
print(' ', x)
print()
def output_failed(self, failed):
pass
def _check_category(self, category=None):
'''Checks that the category (seedfile) is listed
in the [seeds] config or defaults['seeds'] section
@param args: configparser instance
@return boolean
'''
available_cats = list(self.config.defaults['seeds'])
if category and category not in available_cats:
self.logger.error("Invalid category or seedfile entered: %s" % category)
self.logger.error("Available categories or seedfiles: %s" % ', '.join(sorted(available_cats)))
return False
return True
|
gentoo/gentoo-keys
|
gkeys/gkeys/base.py
|
Python
|
gpl-2.0
| 14,995
|
[
"Brian"
] |
e8d35516e7a1aa00b6175d0798ef9dfe3ccc2eb1d8a9424b767fbecc326fd395
|
from collections import namedtuple
from django.test import SimpleTestCase, TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.locations.tree_utils import TreeError, assert_no_cycles, expansion_validators
from corehq.apps.locations.bulk_management import (
NewLocationImporter,
LocationTypeStub,
LocationStub,
LocationTreeValidator,
LocationCollection,
)
# These example types and trees mirror the information available in the upload files
FLAT_LOCATION_TYPES = [
# name, code, parent_code, do_delete, shares_cases, view_descendants, expand_from, sync_to, index
# ('name', 'code', 'parent_code', 'shares_cases', 'view_descendants'),
('State', 'state', '', False, False, False, '', '', 0),
('County', 'county', 'state', False, False, True, '', '', 0),
('City', 'city', 'county', False, True, False, '', '', 0),
]
DUPLICATE_TYPE_CODES = [
# ('name', 'code', 'parent_code', 'shares_cases', 'view_descendants'),
('State', 'state', '', False, False, False, '', '', 0),
('County', 'county', 'state', False, False, True, '', '', 0),
('City', 'city', 'county', False, True, False, '', '', 0),
('Other County', 'county', 'state', False, False, True, '', '', 0),
]
CYCLIC_LOCATION_TYPES = [
('State', 'state', '', False, False, False, '', '', 0),
('County', 'county', 'state', False, False, True, '', '', 0),
('City', 'city', 'county', False, True, False, '', '', 0),
# These three cycle:
('Region', 'region', 'village', False, False, False, '', '', 0),
('District', 'district', 'region', False, False, True, '', '', 0),
('Village', 'village', 'district', False, True, False, '', '', 0),
]
# external_id, latitude, longitude, custom_data, uncategorized_data, index
extra_stub_args = ('', '', '', {}, {}, 0)
BASIC_LOCATION_TREE = [
# (name, site_code, location_type, parent_code, location_id,
# do_delete, external_id, latitude, longitude, index)
('Massachusetts', 'mass', 'state', '', '1234', False) + extra_stub_args,
('Suffolk', 'suffolk', 'county', 'mass', '2345', False) + extra_stub_args,
('Boston', 'boston', 'city', 'suffolk', '2346', False) + extra_stub_args,
('Middlesex', 'middlesex', 'county', 'mass', '3456', False) + extra_stub_args,
('Cambridge', 'cambridge', 'city', 'middlesex', '3457', False) + extra_stub_args,
('Florida', 'florida', 'state', '', '5432', False) + extra_stub_args,
('Duval', 'duval', 'county', 'florida', '5433', False) + extra_stub_args,
('Jacksonville', 'jacksonville', 'city', 'duval', '5434', False) + extra_stub_args,
]
MOVE_SUFFOLK_TO_FLORIDA = [
('Massachusetts', 'mass', 'state', '', '1234', False) + extra_stub_args,
# this is the only changed line (parent is changed to florida)
('Suffolk', 'suffolk', 'county', 'florida', '2345', False) + extra_stub_args,
('Boston', 'boston', 'city', 'suffolk', '2346', False) + extra_stub_args,
('Middlesex', 'middlesex', 'county', 'mass', '3456', False) + extra_stub_args,
('Cambridge', 'cambridge', 'city', 'middlesex', '3457', False) + extra_stub_args,
('Florida', 'florida', 'state', '', '5432', False) + extra_stub_args,
('Duval', 'duval', 'county', 'florida', '5433', False) + extra_stub_args,
('Jacksonville', 'jacksonville', 'city', 'duval', '5434', False) + extra_stub_args,
]
DELETE_SUFFOLK = [
('Massachusetts', 'mass', 'state', '', '1234', False) + extra_stub_args,
# These next two are marked as 'delete'
('Suffolk', 'suffolk', 'county', 'mass', '2345', True) + extra_stub_args,
('Boston', 'boston', 'city', 'suffolk', '2346', True) + extra_stub_args,
('Middlesex', 'middlesex', 'county', 'mass', '3456', False) + extra_stub_args,
('Cambridge', 'cambridge', 'city', 'middlesex', '3457', False) + extra_stub_args,
('Florida', 'florida', 'state', '', '5432', False) + extra_stub_args,
('Duval', 'duval', 'county', 'florida', '5433', False) + extra_stub_args,
('Jacksonville', 'jacksonville', 'city', 'duval', '5434', False) + extra_stub_args,
]
MAKE_SUFFOLK_A_STATE_INVALID = [
('Massachusetts', 'mass', 'state', '', '1234', False) + extra_stub_args,
# This still lists mass as a parent, which is invalid,
# plus, Boston (a city), can't have a state as a parent
('Suffolk', 'suffolk', 'state', 'mass', '2345', False) + extra_stub_args,
('Boston', 'boston', 'city', 'suffolk', '2346', False) + extra_stub_args,
('Middlesex', 'middlesex', 'county', 'mass', '3456', False) + extra_stub_args,
('Cambridge', 'cambridge', 'city', 'middlesex', '3457', False) + extra_stub_args,
('Florida', 'florida', 'state', '', '5432', False) + extra_stub_args,
('Duval', 'duval', 'county', 'florida', '5433', False) + extra_stub_args,
('Jacksonville', 'jacksonville', 'city', 'duval', '5434', False) + extra_stub_args,
]
MAKE_SUFFOLK_A_STATE_VALID = [
('Massachusetts', 'mass', 'state', '', '1234', False) + extra_stub_args,
('Suffolk', 'suffolk', 'state', '', '2345', False) + extra_stub_args,
('Boston', 'boston', 'county', 'suffolk', '2346', False) + extra_stub_args,
('Middlesex', 'middlesex', 'county', 'mass', '3456', False) + extra_stub_args,
('Cambridge', 'cambridge', 'city', 'middlesex', '3457', False) + extra_stub_args,
('Florida', 'florida', 'state', '', '5432', False) + extra_stub_args,
('Duval', 'duval', 'county', 'florida', '5433', False) + extra_stub_args,
('Jacksonville', 'jacksonville', 'city', 'duval', '5434', False) + extra_stub_args,
]
DUPLICATE_SITE_CODES = [
('Massachusetts', 'mass', 'state', '', '1234', False) + extra_stub_args,
('Suffolk', 'suffolk', 'county', 'mass', '2345', False) + extra_stub_args,
('Boston', 'boston', 'city', 'suffolk', '2346', False) + extra_stub_args,
('Middlesex', 'middlesex', 'county', 'mass', '3456', False) + extra_stub_args,
('Cambridge', 'cambridge', 'city', 'middlesex', '3457', False) + extra_stub_args,
('East Cambridge', 'cambridge', 'city', 'middlesex', '3457', False) + extra_stub_args,
]
SAME_NAME_SAME_PARENT = [
('Massachusetts', 'mass', 'state', '', '1234', False) + extra_stub_args,
('Middlesex', 'middlesex', 'county', 'mass', '3456', False) + extra_stub_args,
# These two locations have the same name AND same parent
('Cambridge', 'cambridge', 'city', 'middlesex', '3457', False) + extra_stub_args,
('Cambridge', 'cambridge2', 'city', 'middlesex', '3458', False) + extra_stub_args,
]
class TestTreeUtils(SimpleTestCase):
def test_no_issues(self):
assert_no_cycles([
("State", 'TOP'),
("County", "State"),
("City", "County"),
("Region", "State"),
("District", "Region"),
])
def test_bad_parent_ref(self):
with self.assertRaises(TreeError) as e:
assert_no_cycles([
("County", "State"), # State doesn't exist
("City", "County"),
("Region", "State"), # State doesn't exist
("District", "Region"),
])
self.assertItemsEqual(
e.exception.affected_nodes,
["County", "Region"]
)
def test_has_cycle(self):
with self.assertRaises(TreeError) as e:
assert_no_cycles([
("State", 'TOP'),
("County", "State"),
("City", "County"),
# These three cycle:
("Region", "Village"),
("District", "Region"),
("Village", "District"),
])
self.assertItemsEqual(
e.exception.affected_nodes,
["Region", "District", "Village"]
)
def test_expansion_validators(self):
# a, b are TOP. a has c,d as children, b has e as child
from_validator, to_validator = expansion_validators(
[('a', 'TOP'), ('b', 'TOP'), ('c', 'a'), ('d', 'a'), ('e', 'b')]
)
self.assertEqual(set(from_validator('a')), set(['a', 'TOP']))
self.assertEqual(set(from_validator('b')), set(['b', 'TOP']))
self.assertEqual(set(from_validator('c')), set(['c', 'a', 'TOP']))
self.assertEqual(set(from_validator('d')), set(['d', 'a', 'TOP']))
self.assertEqual(set(from_validator('e')), set(['e', 'b', 'TOP']))
self.assertEqual(set(to_validator('a')), set(['a', 'c', 'd']))
self.assertEqual(set(to_validator('b')), set(['b', 'e']))
self.assertEqual(set(to_validator('c')), set(['c']))
self.assertEqual(set(to_validator('d')), set(['d']))
self.assertEqual(set(to_validator('e')), set(['e']))
# a is TOP. a has b as child, b has c as child
from_validator, to_validator = expansion_validators(
[('a', 'TOP'), ('b', 'a'), ('c', 'b')]
)
self.assertEqual(set(from_validator('a')), set(['a', 'TOP']))
self.assertEqual(set(from_validator('b')), set(['a', 'b', 'TOP']))
self.assertEqual(set(from_validator('c')), set(['a', 'b', 'c', 'TOP']))
self.assertEqual(set(to_validator('a')), set(['a', 'b', 'c']))
self.assertEqual(set(to_validator('b')), set(['b', 'c']))
self.assertEqual(set(to_validator('c')), set(['c']))
def get_validator(location_types, locations, old_collection=None):
validator = LocationTreeValidator(
[LocationTypeStub(*loc_type) for loc_type in location_types],
[LocationStub(*loc) for loc in locations],
old_collection=old_collection
)
return validator
MockCollection = namedtuple(
'MockCollection',
'types locations locations_by_id locations_by_site_code domain_name custom_data_validator')
def make_collection(types, locations):
types = [LocationTypeStub(*loc_type) for loc_type in types]
locations = [LocationStub(*loc) for loc in locations]
return MockCollection(
types=types,
locations=locations,
locations_by_id={l.location_id: l for l in locations},
locations_by_site_code={l.site_code: l for l in locations},
custom_data_validator=None,
domain_name='location-bulk-management',
)
class TestTreeValidator(SimpleTestCase):
def test_good_location_set(self):
validator = get_validator(FLAT_LOCATION_TYPES, BASIC_LOCATION_TREE)
self.assertEqual(len(validator.errors), 0)
def test_cyclic_location_types(self):
validator = get_validator(CYCLIC_LOCATION_TYPES, BASIC_LOCATION_TREE)
self.assertEqual(len(validator._validate_types_tree()), 3)
def test_bad_type_change(self):
validator = get_validator(FLAT_LOCATION_TYPES, MAKE_SUFFOLK_A_STATE_INVALID)
all_errors = validator.errors
self.assertEqual(len(all_errors), 2)
tree_errors = validator._validate_location_tree()
self.assertEqual(len(tree_errors), 2)
def test_good_type_change(self):
validator = get_validator(FLAT_LOCATION_TYPES, MAKE_SUFFOLK_A_STATE_VALID)
errors = validator.errors
self.assertEqual(len(errors), 0)
def test_duplicate_type_codes(self):
validator = get_validator(DUPLICATE_TYPE_CODES, BASIC_LOCATION_TREE)
errors = validator.errors
type_errors = validator._check_unique_type_codes()
self.assertEqual(len(errors), 1)
self.assertEqual(len(type_errors), 1)
self.assertIn("county", errors[0])
def test_valid_expansions(self):
validator = get_validator(
[
# name, code, parent_code, do_delete, shares_cases, view_descendants, expand_from, sync_to, index
# empty from, descendant as to
('A', 'a', '', False, False, False, '', 'd', 0),
# itself as from, descendant as to
('B', 'b', '', False, False, False, 'b', 'e', 0),
# empty to, parentage as from
('C', 'c', 'a', False, False, False, 'a', '', 0),
# itself as to, parentage as from
('D', 'd', 'a', False, False, False, 'a', 'd', 0),
# parentage as from, empty to
('E', 'e', 'b', False, False, False, 'b', '', 0),
],
[]
)
errors = validator.errors
self.assertEqual(errors, [])
def test_invalid_expansions(self):
validator = get_validator(
[
# name, code, parent_code, do_delete, shares_cases, view_descendants, expand_from, sync_to, index
('A', 'a', '', False, False, False, '', 'd', 0),
# 'a' is not a descendant of 'b'
('B', 'b', '', False, False, False, 'b', 'a', 0),
('C', 'c', 'a', False, False, False, 'a', '', 0),
# 'b' doesn't occur in its parentage
('D', 'd', 'a', False, False, False, 'b', 'd', 0),
('E', 'e', 'b', False, False, False, 'b', '', 0),
],
[]
)
errors = validator.errors
self.assertEqual(len(errors), 2)
def test_duplicate_location(self):
validator = get_validator(FLAT_LOCATION_TYPES, DUPLICATE_SITE_CODES)
errors = validator.errors
self.assertEqual(len(errors), 2)
self.assertEqual(len(validator._check_unique_location_codes()), 1)
self.assertEqual(len(validator._check_unique_location_ids()), 1)
self.assertIn("cambridge", errors[0])
def test_same_name_same_parent(self):
validator = get_validator(FLAT_LOCATION_TYPES, SAME_NAME_SAME_PARENT)
errors = validator.errors
self.assertEqual(len(errors), 1)
self.assertEqual(len(validator._check_location_names()), 1)
self.assertIn("middlesex", errors[0])
def test_missing_types(self):
# all types in the domain should be listed in given excel
old_types = FLAT_LOCATION_TYPES + [('Galaxy', 'galaxy', '', False, False, False, '', '', 0)]
old_collection = make_collection(old_types, BASIC_LOCATION_TREE)
validator = get_validator(FLAT_LOCATION_TYPES, BASIC_LOCATION_TREE, old_collection)
missing_type_errors = validator._check_unlisted_type_codes()
self.assertEqual(len(missing_type_errors), 1)
self.assertEqual(len(validator.errors), 1)
self.assertIn('galaxy', missing_type_errors[0])
def test_missing_location_ids(self):
# all locations in the domain should be listed in given excel
old_locations = (
BASIC_LOCATION_TREE +
[('extra_state', 'ex_code', 'state', '', 'ex_id', False) + extra_stub_args]
)
old_collection = make_collection(FLAT_LOCATION_TYPES, old_locations)
validator = get_validator(FLAT_LOCATION_TYPES, BASIC_LOCATION_TREE, old_collection)
missing_locations = validator._check_unlisted_location_ids()
self.assertEqual(len(missing_locations), 1)
self.assertEqual(len(validator.errors), 1)
self.assertIn('extra_state', missing_locations[0])
def test_unknown_location_ids(self):
# all locations in the domain should be listed in given excel
old_collection = make_collection(FLAT_LOCATION_TYPES, BASIC_LOCATION_TREE)
new_locations = (
BASIC_LOCATION_TREE +
[('extra_state', 'ex_code', 'state', '', 'ex_id', False) + extra_stub_args]
)
validator = get_validator(FLAT_LOCATION_TYPES, new_locations, old_collection)
unknown_locations = validator._check_unknown_location_ids()
self.assertEqual(len(unknown_locations), 1)
self.assertEqual(len(validator.errors), 1)
self.assertIn('ex_id', unknown_locations[0])
class TestBulkManagement(TestCase):
basic_tree = [
# (name, site_code, location_type, parent_code, location_id,
# do_delete, external_id, latitude, longitude, index)
('S1', 's1', 'state', '', '', False) + extra_stub_args,
('S2', 's2', 'state', '', '', False) + extra_stub_args,
('County11', 'county11', 'county', 's1', '', False) + extra_stub_args,
('County21', 'county21', 'county', 's2', '', False) + extra_stub_args,
('City111', 'city111', 'city', 'county11', '', False) + extra_stub_args,
('City112', 'city112', 'city', 'county11', '', False) + extra_stub_args,
('City211', 'city211', 'city', 'county21', '', False) + extra_stub_args,
]
@classmethod
def as_pairs(cls, tree):
pairs = []
for l in tree:
code = l[1]
parent_code = l[3] or None
do_delete = l[5]
if not do_delete:
pairs.append((code, parent_code))
return set(pairs)
def setUp(self):
super(TestBulkManagement, self).setUp()
self.domain = create_domain('location-bulk-management')
def tearDown(self):
super(TestBulkManagement, self).tearDown()
# domain delete cascades to everything else
self.domain.delete()
def create_location_types(self, location_types):
def _make_loc_type(name, code, parent_code, _delete, shares_cases, view_descendants,
expand_from, sync_to, _i, parent_type=None):
return LocationType.objects.create(
domain=self.domain.name,
name=name,
code=code,
parent_type=parent_type,
shares_cases=shares_cases,
view_descendants=view_descendants
)
lt_by_code = {}
for lt in location_types:
code = lt[1]
parent_code = lt[2]
parent_type = lt_by_code.get(parent_code)
location_type = _make_loc_type(*lt, parent_type=parent_type)
lt_by_code[code] = location_type
return lt_by_code
def create_locations(self, locations, lt_by_code):
def _make_loc(name, site_code, location_type, parent_code, location_id,
do_delete, external_id, latitude, longitude, custom_data, uncategorized_data,
index, parent=None):
_type = lt_by_code.get(location_type)
loc = SQLLocation(
site_code=site_code, name=name, domain=self.domain.name, location_type=_type,
parent=parent,
)
loc.save()
return loc
locations_by_code = {}
for l in locations:
code = l[1]
parent_code = l[3]
parent = locations_by_code.get(parent_code)
location = _make_loc(*l, parent=parent)
locations_by_code[code] = location
return locations_by_code
def bulk_update_locations(self, types, locations):
importer = NewLocationImporter(
self.domain.name,
[LocationTypeStub(*loc_type) for loc_type in types],
[LocationStub(*loc) for loc in locations],
)
result = importer.run()
return result
def assertLocationTypesMatch(self, expected_types):
# Makes sure that the set of all location types in the domain matches
# the passed-in location types
actual_types = self.domain.location_types
# covert it to the format of passed-in tuples
actual = [
(lt.name, lt.code,
lt.parent_type.code if lt.parent_type else '', False, lt.shares_cases or False,
lt.view_descendants, lt.expand_from.code if lt.expand_from else '',
lt.expand_to.code if lt.expand_to else '')
for lt in actual_types
]
expected = []
for lt in expected_types:
do_delete = lt[3]
if not do_delete:
# drop index
expected.append(tuple(lt[0:-1]))
self.assertEqual(set(actual), set(expected))
def assertLocationsMatch(self, expected_locations, check_attr='site_code'):
collection = LocationCollection(self.domain)
actual = []
for l in collection.locations:
attr = getattr(l, check_attr)
if l.parent:
parent = l.parent.site_code
else:
parent = None
actual.append((attr, parent))
self.assertEqual(set(actual), expected_locations)
self.assertMpttDescendants(expected_locations)
def assertMpttDescendants(self, pairs):
# Given list of (child, parent), check that for each location
# SQLLocation.get_descendants is same as calculated descendants
from collections import defaultdict
# index by parent, to calculate descendants
by_parent = defaultdict(list)
for (child, parent) in pairs:
by_parent[parent].append(child)
descendants = defaultdict(list)
def get_descendants(l):
if descendants[l]:
return descendants[l]
to_ret = []
children = by_parent[l]
for child in children:
to_ret = to_ret + get_descendants(child)
return children + to_ret
# calculate descendants for each location
for (child, pair) in pairs:
descendants[child] = get_descendants(child)
# for each location assert that calculated and expected get_descendants are equal
for (l, desc) in descendants.iteritems():
q = SQLLocation.objects.filter(site_code=l)
loc = q[0] if q else None
actual = [i.site_code for i in loc.get_descendants()] if loc else []
self.assertEqual(set(actual), set(desc))
def assertCouchSync(self):
def assertLocationsEqual(loc1, loc2):
fields = ["domain", "name", "location_id", "location_type_name",
"site_code", "external_id", "metadata", "is_archived"]
for field in fields:
msg = "The locations have different values for '{}'".format(field)
self.assertEqual(getattr(loc1, field), getattr(loc2, field), msg)
def get_parent(loc):
return loc.parent.location_id if loc.parent else None
self.assertEqual(get_parent(loc1), get_parent(loc2))
collection = LocationCollection(self.domain)
for loc in collection.locations:
assertLocationsEqual(loc, loc.couch_location)
def test_location_creation(self):
result = self.bulk_update_locations(
FLAT_LOCATION_TYPES,
self.basic_tree
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(FLAT_LOCATION_TYPES)
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
self.assertCouchSync()
def test_data_format(self):
data = [
('S1', '1', 'state', '', '', False, '12', 'not-lat', '2345', {}, {}, 0),
('S2', '2', 'state', '', '', False, '12', '3434', '2345', {}, {}, 0),
]
result = self.bulk_update_locations(
FLAT_LOCATION_TYPES,
data
)
self.assertEqual(len(result.errors), 1)
self.assertTrue('lat' in result.errors[0])
def test_move_county21_to_state1(self):
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
locations_by_code = self.create_locations(self.basic_tree, lt_by_code)
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
_loc_id = lambda x: locations_by_code[x].location_id
move_county21_to_state1 = [
# (name, site_code, location_type, parent_code, location_id,
# do_delete, external_id, latitude, longitude, index)
('S1', 's1', 'state', '', _loc_id('s1'), False) + extra_stub_args,
('S2', 's2', 'state', '', _loc_id('s2'), False) + extra_stub_args,
('County11', 'county11', 'county', 's1', _loc_id('county11'), False) + extra_stub_args,
# change parent_code from s2 -> s1
('County21', 'county21', 'county', 's1', _loc_id('county21'), False) + extra_stub_args,
('City111', 'city111', 'city', 'county11', _loc_id('city111'), False) + extra_stub_args,
('City112', 'city112', 'city', 'county11', _loc_id('city112'), False) + extra_stub_args,
('City211', 'city211', 'city', 'county21', _loc_id('city211'), False) + extra_stub_args,
# create new city
('City311', 'city311', 'city', 'county11', '', False) + extra_stub_args,
]
result = self.bulk_update_locations(
FLAT_LOCATION_TYPES,
move_county21_to_state1,
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(FLAT_LOCATION_TYPES)
self.assertLocationsMatch(self.as_pairs(move_county21_to_state1))
self.assertCouchSync()
def test_delete_county11(self):
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
locations_by_code = self.create_locations(self.basic_tree, lt_by_code)
_loc_id = lambda x: locations_by_code[x].location_id
delete_county11 = [
('S1', 's1', 'state', '', _loc_id('s1'), False) + extra_stub_args,
('S2', 's2', 'state', '', _loc_id('s2'), False) + extra_stub_args,
('County11', 'county11', 'county', 's1', _loc_id('county11'), True) + extra_stub_args,
('County21', 'county21', 'county', 's2', _loc_id('county21'), False) + extra_stub_args,
('City111', 'city111', 'city', 'county11', _loc_id('city111'), True) + extra_stub_args,
('City112', 'city112', 'city', 'county11', _loc_id('city112'), True) + extra_stub_args,
('City211', 'city211', 'city', 'county21', _loc_id('city211'), False) + extra_stub_args,
]
result = self.bulk_update_locations(
FLAT_LOCATION_TYPES,
delete_county11,
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(FLAT_LOCATION_TYPES)
self.assertLocationsMatch(self.as_pairs(delete_county11))
self.assertCouchSync()
def test_invalid_tree(self):
# Invalid location upload should not pass or affect existing location structure
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
locations_by_code = self.create_locations(self.basic_tree, lt_by_code)
_loc_id = lambda x: locations_by_code[x].location_id
delete_s2 = [
('S1', 's1', 'state', '', _loc_id('s1'), False) + extra_stub_args,
# delete s2, but don't delete its descendatns. This is invalid
('S2', 's2', 'state', '', _loc_id('s2'), True) + extra_stub_args,
('County11', 'county11', 'county', 's1', _loc_id('county11'), False) + extra_stub_args,
('County21', 'county21', 'county', 's2', _loc_id('county21'), False) + extra_stub_args,
('City111', 'city111', 'city', 'county11', _loc_id('city111'), False) + extra_stub_args,
('City112', 'city112', 'city', 'county11', _loc_id('city112'), False) + extra_stub_args,
('City211', 'city211', 'city', 'county21', _loc_id('city211'), False) + extra_stub_args,
]
result = self.bulk_update_locations(
FLAT_LOCATION_TYPES,
delete_s2,
)
self.assertNotEqual(result.errors, [])
self.assertLocationTypesMatch(FLAT_LOCATION_TYPES)
# Since there were errors, the location tree should be as it was
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
self.assertCouchSync()
def test_edit_by_location_id(self):
# Locations can be referred by location_id and empty site_code
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
locations_by_code = self.create_locations(self.basic_tree, lt_by_code)
_loc_id = lambda x: locations_by_code[x].location_id
move_county21_to_state1 = [
('S1', '', 'state', '', _loc_id('s1'), False) + extra_stub_args,
('S2', '', 'state', '', _loc_id('s2'), False) + extra_stub_args,
('County11', '', 'county', 's1', _loc_id('county11'), False) + extra_stub_args,
('County21', '', 'county', 's1', _loc_id('county21'), False) + extra_stub_args,
('City111', '', 'city', 'county11', _loc_id('city111'), False) + extra_stub_args,
('City112', '', 'city', 'county11', _loc_id('city112'), False) + extra_stub_args,
('City211', '', 'city', 'county21', _loc_id('city211'), False) + extra_stub_args,
]
result = self.bulk_update_locations(
FLAT_LOCATION_TYPES, # No change to types
move_county21_to_state1, # This is the desired end result
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(FLAT_LOCATION_TYPES)
self.assertLocationsMatch(set([
('s1', None), ('s2', None), ('county11', 's1'), ('county21', 's1'),
('city111', 'county11'), ('city112', 'county11'), ('city211', 'county21')
]))
self.assertCouchSync()
def test_edit_by_sitecode(self):
# Locations can be referred by site_code and empty location_id
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
self.create_locations(self.basic_tree, lt_by_code)
move_county21_to_state1 = [
('S1', 's1', 'state', '', '', False) + extra_stub_args,
('S2', 's2', 'state', '', '', False) + extra_stub_args,
('County11', 'county11', 'county', 's1', '', False) + extra_stub_args,
# change parent_code from s2 -> s1
('County21', 'county21', 'county', 's1', '', False) + extra_stub_args,
('City111', 'city111', 'city', 'county11', '', False) + extra_stub_args,
('City112', 'city112', 'city', 'county11', '', False) + extra_stub_args,
('City211', 'city211', 'city', 'county21', '', False) + extra_stub_args,
]
result = self.bulk_update_locations(
FLAT_LOCATION_TYPES, # No change to types
move_county21_to_state1, # This is the desired end result
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(FLAT_LOCATION_TYPES)
self.assertLocationsMatch(self.as_pairs(move_county21_to_state1))
self.assertCouchSync()
def test_delete_city_type_valid(self):
# delete a location type and locations of that type
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
self.create_locations(self.basic_tree, lt_by_code)
delete_city_types = [
('State', 'state', '', False, False, False, '', '', 0),
('County', 'county', 'state', False, False, True, '', '', 0),
('City', 'city', 'county', True, True, False, '', '', 0),
]
delete_cities_locations = [
('S1', 's1', 'state', '', '', False) + extra_stub_args,
('S2', 's2', 'state', '', '', False) + extra_stub_args,
('County11', 'county11', 'county', 's1', '', False) + extra_stub_args,
('County21', 'county21', 'county', 's2', '', False) + extra_stub_args,
# delete locations of type 'city'
('City111', 'city111', 'city', 'county11', '', True) + extra_stub_args,
('City112', 'city112', 'city', 'county11', '', True) + extra_stub_args,
('City211', 'city211', 'city', 'county21', '', True) + extra_stub_args,
]
result = self.bulk_update_locations(
delete_city_types, # No change to types
delete_cities_locations, # This is the desired end result
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(delete_city_types)
self.assertLocationsMatch(self.as_pairs(delete_cities_locations))
self.assertCouchSync()
def test_delete_everything(self):
# delete everything
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
self.create_locations(self.basic_tree, lt_by_code)
delete_city_types = [
('State', 'state', '', True, False, False, '', '', 0),
('County', 'county', 'state', True, False, True, '', '', 0),
('City', 'city', 'county', True, True, False, '', '', 0),
]
delete_cities_locations = [
('S1', 's1', 'state', '', '', True) + extra_stub_args,
('S2', 's2', 'state', '', '', True) + extra_stub_args,
('County11', 'county11', 'county', 's1', '', True) + extra_stub_args,
('County21', 'county21', 'county', 's2', '', True) + extra_stub_args,
('City111', 'city111', 'city', 'county11', '', True) + extra_stub_args,
('City112', 'city112', 'city', 'county11', '', True) + extra_stub_args,
('City211', 'city211', 'city', 'county21', '', True) + extra_stub_args,
]
result = self.bulk_update_locations(
delete_city_types, # No change to types
delete_cities_locations, # This is the desired end result
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(delete_city_types)
self.assertLocationsMatch(self.as_pairs(delete_cities_locations))
self.assertCouchSync()
def test_delete_city_type_invalid(self):
# delete a location type but don't delete locations of that type.
# this is invalid upload and should not go through
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
self.create_locations(self.basic_tree, lt_by_code)
delete_city_types = [
('State', 'state', '', False, False, False, '', '', 0),
('County', 'county', 'state', False, False, True, '', '', 0),
('City', 'city', 'county', True, True, False, '', '', 0),
]
result = self.bulk_update_locations(
delete_city_types, # delete city type
self.basic_tree, # but don't delete locations of city type
)
self.assertNotEqual(result.errors, [])
self.assertLocationTypesMatch(FLAT_LOCATION_TYPES)
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
self.assertCouchSync()
def test_edit_names(self):
# metadata attributes like 'name' can be updated
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
locations_by_code = self.create_locations(self.basic_tree, lt_by_code)
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
self.assertCouchSync()
_loc_id = lambda x: locations_by_code[x].location_id
change_names = [
# (name, site_code, location_type, parent_code, location_id,
# do_delete, external_id, latitude, longitude, index)
# changing names
('State 1', '', 'state', '', _loc_id('s1'), False) + extra_stub_args,
('State 2', '', 'state', '', _loc_id('s2'), False) + extra_stub_args,
('County 11', '', 'county', 's1', _loc_id('county11'), False) + extra_stub_args,
('County 21', '', 'county', 's2', _loc_id('county21'), False) + extra_stub_args,
('City 111', '', 'city', 'county11', _loc_id('city111'), False) + extra_stub_args,
('City 112', '', 'city', 'county11', _loc_id('city112'), False) + extra_stub_args,
('City 211', '', 'city', 'county21', _loc_id('city211'), False) + extra_stub_args,
]
result = self.bulk_update_locations(
FLAT_LOCATION_TYPES,
change_names,
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(FLAT_LOCATION_TYPES)
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
self.assertLocationsMatch(set([
('State 1', None), ('State 2', None), ('County 11', 's1'), ('County 21', 's2'),
('City 111', 'county11'), ('City 112', 'county11'), ('City 211', 'county21')
]), check_attr='name')
self.assertCouchSync()
def test_partial_type_edit(self):
# edit a subset of types
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
self.create_locations(self.basic_tree, lt_by_code)
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
edit_types = [
('State', 'state', '', False, False, False, '', '', 0),
# change name of this type
('District', 'county', 'state', False, False, False, '', '', 0),
('City', 'city', 'county', False, False, False, '', '', 0),
]
result = self.bulk_update_locations(
edit_types,
self.basic_tree,
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(edit_types)
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
self.assertCouchSync()
def test_edit_expansions(self):
# 'expand_from', 'expand_to' can be updated
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
self.create_locations(self.basic_tree, lt_by_code)
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
edit_expansions = [
('State', 'state', '', False, False, False, '', 'city', 0),
('County', 'county', 'state', False, False, False, '', '', 0),
('City', 'city', 'county', False, False, False, 'county', '', 0),
]
result = self.bulk_update_locations(
edit_expansions,
self.basic_tree,
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(edit_expansions)
self.assertLocationsMatch(self.as_pairs(self.basic_tree))
self.assertCouchSync()
def test_rearrange_locations(self):
# a total rearrangement like reversing the tree can be done
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
self.create_locations(self.basic_tree, lt_by_code)
reverse_order = [
('State', 'state', 'county', False, False, False, '', '', 0),
('County', 'county', 'city', False, False, False, '', '', 0),
('City', 'city', '', False, False, False, '', '', 0),
]
edit_types_of_locations = [
# change parent from TOP to county
('S1', 's1', 'state', 'county11', '', False) + extra_stub_args,
('S2', 's2', 'state', 'county11', '', False) + extra_stub_args,
# change parent from state to city
('County11', 'county11', 'county', 'city111', '', False) + extra_stub_args,
('County21', 'county21', 'county', 'city111', '', False) + extra_stub_args,
# make these two TOP locations
('City111', 'city111', 'city', '', '', False) + extra_stub_args,
('City112', 'city112', 'city', '', '', False) + extra_stub_args,
# delete this
('City211', 'city211', 'city', 'county21', '', True) + extra_stub_args,
]
result = self.bulk_update_locations(
reverse_order, # No change to types
edit_types_of_locations, # This is the desired end result
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(reverse_order)
self.assertLocationsMatch(self.as_pairs(edit_types_of_locations))
self.assertCouchSync()
def test_swap_parents(self):
lt_by_code = self.create_location_types(FLAT_LOCATION_TYPES)
original = [
('State 1', 's1', 'state', '', '', False) + extra_stub_args,
('State 2', 's2', 'state', '', '', False) + extra_stub_args,
('County 11', 'c1', 'county', 's1', '', False) + extra_stub_args,
('County 21', 'c2', 'county', 's2', '', False) + extra_stub_args,
]
self.create_locations(original, lt_by_code)
swap_parents = [
('State 1', 's1', 'state', '', '', False) + extra_stub_args,
('State 2', 's2', 'state', '', '', False) + extra_stub_args,
('County 11', 'c1', 'county', 's2', '', False) + extra_stub_args,
('County 21', 'c2', 'county', 's1', '', False) + extra_stub_args,
]
result = self.bulk_update_locations(
FLAT_LOCATION_TYPES,
swap_parents,
)
self.assertEqual(result.errors, [])
self.assertLocationTypesMatch(FLAT_LOCATION_TYPES)
self.assertLocationsMatch(self.as_pairs(swap_parents))
self.assertCouchSync()
|
qedsoftware/commcare-hq
|
corehq/apps/locations/tests/test_bulk_management.py
|
Python
|
bsd-3-clause
| 40,566
|
[
"Galaxy"
] |
1ec47bba318b91aa1f73a9376f3a4c6fb7ae807de15f089c2f09afb17fb862f6
|
import numpy as np
import os
import os.path as op
from os.path import join as pjoin
import re
import shutil
import subprocess
from nose.tools import assert_equal
from numpy.testing import assert_raises, assert_array_equal
from tempfile import mkdtemp, mktemp
import nibabel as nib
from surfer import Brain, io, utils
from surfer.utils import requires_ffmpeg, requires_fsaverage
from mayavi import mlab
subj_dir = utils._get_subjects_dir()
subject_id = 'fsaverage'
std_args = [subject_id, 'lh', 'inflated']
data_dir = pjoin(op.dirname(__file__), '..', '..', 'examples', 'example_data')
overlay_fname = pjoin(data_dir, 'lh.sig.nii.gz')
def has_freesurfer():
if 'FREESURFER_HOME' not in os.environ:
return False
else:
return True
requires_fs = np.testing.dec.skipif(not has_freesurfer(),
'Requires FreeSurfer command line tools')
@requires_fsaverage
def test_offscreen():
"""Test offscreen rendering
"""
mlab.options.backend = 'auto'
brain = Brain(*std_args, offscreen=True)
shot = brain.screenshot()
assert_array_equal(shot.shape, (800, 800, 3))
brain.close()
@requires_fsaverage
def test_image():
"""Test image saving
"""
tmp_name = mktemp() + '.png'
mlab.options.backend = 'auto'
subject_id, _, surf = std_args
brain = Brain(subject_id, 'both', surf=surf, size=100)
brain.add_overlay(overlay_fname, hemi='lh', min=5, max=20, sign="pos")
brain.save_imageset(tmp_name, ['med', 'lat'], 'jpg')
brain = Brain(*std_args, size=100)
brain.save_image(tmp_name)
brain.save_montage(tmp_name, ['l', 'v', 'm'], orientation='v')
brain.save_montage(tmp_name, ['l', 'v', 'm'], orientation='h')
brain.save_montage(tmp_name, [['l', 'v'], ['m', 'f']])
brain.screenshot()
brain.close()
@requires_fsaverage
def test_brains():
"""Test plotting of Brain with different arguments
"""
# testing backend breaks when passing in a figure, so we use 'auto' here
# (shouldn't affect usability, but it makes testing more annoying)
mlab.options.backend = 'auto'
surfs = ['inflated', 'white']
hemis = ['lh', 'rh']
curvs = [True, False]
titles = [None, 'Hello']
cortices = ["low_contrast", ("Reds", 0, 1, False)]
sizes = [500, (400, 300)]
backgrounds = ["white", "blue"]
foregrounds = ["black", "white"]
figs = [None, mlab.figure()]
subj_dirs = [None, subj_dir]
for surf, hemi, curv, title, cort, s, bg, fg, fig, sd \
in zip(surfs, hemis, curvs, titles, cortices, sizes,
backgrounds, foregrounds, figs, subj_dirs):
brain = Brain(subject_id, hemi, surf, curv, title,
cort, s, bg, fg, fig, sd)
brain.close()
assert_raises(ValueError, Brain, subject_id, 'lh', 'inflated',
subjects_dir='')
@requires_fsaverage
def test_annot():
"""Test plotting of annot
"""
mlab.options.backend = 'test'
annots = ['aparc', 'aparc.a2005s']
borders = [True, False, 2]
alphas = [1, 0.5]
brain = Brain(*std_args)
for a, b, p in zip(annots, borders, alphas):
brain.add_annotation(a, b, p)
assert_raises(ValueError, brain.add_annotation, 'aparc', borders=-1)
brain.close()
@requires_fsaverage
def test_contour():
"""Test plotting of contour overlay
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
overlay_file = pjoin(data_dir, "lh.sig.nii.gz")
brain.add_contour_overlay(overlay_file)
brain.add_contour_overlay(overlay_file, max=20, n_contours=9,
line_width=2)
brain.contour['surface'].actor.property.line_width = 1
brain.contour['surface'].contour.number_of_contours = 10
brain.close()
@requires_fsaverage
@requires_fs
def test_data():
"""Test plotting of data
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
mri_file = pjoin(data_dir, 'resting_corr.nii.gz')
reg_file = pjoin(data_dir, 'register.dat')
surf_data = io.project_volume_data(mri_file, "lh", reg_file)
brain.add_data(surf_data, -.7, .7, colormap="jet", alpha=.7)
brain.close()
@requires_fsaverage
def test_foci():
"""Test plotting of foci
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
coords = [[-36, 18, -3],
[-43, 25, 24],
[-48, 26, -2]]
brain.add_foci(coords, map_surface="white", color="gold")
annot_path = pjoin(subj_dir, subject_id, 'label', 'lh.aparc.a2009s.annot')
ids, ctab, names = nib.freesurfer.read_annot(annot_path)
verts = np.arange(0, len(ids))
coords = np.random.permutation(verts[ids == 74])[:10]
scale_factor = 0.7
brain.add_foci(coords, coords_as_verts=True,
scale_factor=scale_factor, color="#A52A2A")
brain.close()
@requires_fsaverage
def test_label():
"""Test plotting of label
"""
mlab.options.backend = 'test'
subject_id = "fsaverage"
hemi = "lh"
surf = "inflated"
brain = Brain(subject_id, hemi, surf)
brain.add_label("BA1")
brain.add_label("BA1", color="blue", scalar_thresh=.5)
label_file = pjoin(subj_dir, subject_id,
"label", "%s.MT.label" % hemi)
brain.add_label(label_file)
brain.add_label("BA44", borders=True)
brain.add_label("BA6", alpha=.7)
brain.show_view("medial")
brain.add_label("V1", color="steelblue", alpha=.6)
brain.add_label("V2", color="#FF6347", alpha=.6)
brain.add_label("entorhinal", color=(.2, 1, .5), alpha=.6)
brain.close()
@requires_fsaverage
def test_meg_inverse():
"""Test plotting of MEG inverse solution
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc')
stc = io.read_stc(stc_fname)
data = stc['data']
vertices = stc['vertices']
time = 1e3 * np.linspace(stc['tmin'],
stc['tmin'] + data.shape[1] * stc['tstep'],
data.shape[1])
colormap = 'hot'
time_label = 'time=%0.2f ms'
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=10, time=time, time_label=time_label)
brain.set_data_time_index(2)
brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True)
# viewer = TimeViewer(brain)
brain.close()
@requires_fsaverage
def test_morphometry():
"""Test plotting of morphometry
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
brain.add_morphometry("curv")
brain.add_morphometry("sulc", grayscale=True)
brain.add_morphometry("thickness")
brain.close()
@requires_ffmpeg
@requires_fsaverage
def test_movie():
"""Test saving a movie of an MEG inverse solution
"""
# create and setup the Brain instance
mlab.options.backend = 'auto'
brain = Brain(*std_args)
stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc')
stc = io.read_stc(stc_fname)
data = stc['data']
time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin']
brain.add_data(data, colormap='hot', vertices=stc['vertices'],
smoothing_steps=10, time=time, time_label='time=%0.2f ms')
brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True)
# save movies with different options
tempdir = mkdtemp()
try:
dst = os.path.join(tempdir, 'test.mov')
brain.save_movie(dst)
brain.save_movie(dst, tmin=0.081, tmax=0.102)
# test the number of frames in the movie
sp = subprocess.Popen(('ffmpeg', '-i', 'test.mov', '-vcodec', 'copy',
'-f', 'null', '/dev/null'), cwd=tempdir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sp.communicate()
m = re.search('frame=\s*(\d+)\s', stderr)
if not m:
raise RuntimeError(stderr)
n_frames = int(m.group(1))
assert_equal(n_frames, 3)
finally:
# clean up
shutil.rmtree(tempdir)
brain.close()
@requires_fsaverage
def test_overlay():
"""Test plotting of overlay
"""
mlab.options.backend = 'test'
# basic overlay support
overlay_file = pjoin(data_dir, "lh.sig.nii.gz")
brain = Brain(*std_args)
brain.add_overlay(overlay_file)
brain.overlays["sig"].remove()
brain.add_overlay(overlay_file, min=5, max=20, sign="pos")
sig1 = io.read_scalar_data(pjoin(data_dir, "lh.sig.nii.gz"))
sig2 = io.read_scalar_data(pjoin(data_dir, "lh.alt_sig.nii.gz"))
thresh = 4
sig1[sig1 < thresh] = 0
sig2[sig2 < thresh] = 0
conjunct = np.min(np.vstack((sig1, sig2)), axis=0)
brain.add_overlay(sig1, 4, 30, name="sig1")
brain.overlays["sig1"].pos_bar.lut_mode = "Reds"
brain.overlays["sig1"].pos_bar.visible = False
brain.add_overlay(sig2, 4, 30, name="sig2")
brain.overlays["sig2"].pos_bar.lut_mode = "Blues"
brain.overlays["sig2"].pos_bar.visible = False
brain.add_overlay(conjunct, 4, 30, name="conjunct")
brain.overlays["conjunct"].pos_bar.lut_mode = "Purples"
brain.overlays["conjunct"].pos_bar.visible = False
brain.close()
@requires_fsaverage
def test_probabilistic_labels():
"""Test plotting of probabilistic labels
"""
mlab.options.backend = 'test'
brain = Brain("fsaverage", "lh", "inflated",
cortex="low_contrast")
brain.add_label("BA1", color="darkblue")
brain.add_label("BA1", color="dodgerblue", scalar_thresh=.5)
brain.add_label("BA45", color="firebrick", borders=True)
brain.add_label("BA45", color="salmon", borders=True, scalar_thresh=.5)
label_file = pjoin(subj_dir, "fsaverage", "label", "lh.BA6.label")
prob_field = np.zeros_like(brain._geo.x)
ids, probs = nib.freesurfer.read_label(label_file, read_scalars=True)
prob_field[ids] = probs
brain.add_data(prob_field, thresh=1e-5)
brain.data["colorbar"].number_of_colors = 10
brain.data["colorbar"].number_of_labels = 11
brain.close()
@requires_fsaverage
def test_text():
"""Test plotting of text
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
brain.add_text(0.1, 0.1, 'Hello', 'blah')
brain.close()
@requires_fsaverage
def test_animate():
"""Test animation
"""
mlab.options.backend = 'auto'
brain = Brain(*std_args, size=100)
brain.add_morphometry('curv')
tmp_name = mktemp() + '.avi'
brain.animate(["m"] * 3, n_steps=2)
brain.animate(['l', 'l'], n_steps=2, fname=tmp_name)
# can't rotate in axial plane
assert_raises(ValueError, brain.animate, ['l', 'd'])
brain.close()
@requires_fsaverage
def test_views():
"""Test showing different views
"""
mlab.options.backend = 'test'
brain = Brain(*std_args)
brain.show_view('lateral')
brain.show_view('m')
brain.show_view('rostral')
brain.show_view('caudal')
brain.show_view('ve')
brain.show_view('frontal')
brain.show_view('par')
brain.show_view('dor')
brain.show_view({'distance': 432})
brain.show_view({'azimuth': 135, 'elevation': 79}, roll=107)
brain.close()
|
diego0020/PySurfer
|
surfer/tests/test_viz.py
|
Python
|
bsd-3-clause
| 11,223
|
[
"Mayavi"
] |
c084a82534e50edd04043c60f3780458c89579cb4a87454b753c43a391bfdc62
|
from mcpi.minecraft import Minecraft
from mcmodels import Rocket, LaunchPad
from time import sleep
import math
def findPointOnCircle(cx, cy, radius, angle):
x = cx + math.sin(math.radians(angle)) * radius
y = cy + math.cos(math.radians(angle)) * radius
return((int(x + 0.5),int(y + 0.5)))
if __name__ == "__main__":
mc = Minecraft.create()
mc.postToChat("SpaceCRAFT - Minecraft Rocket Launch")
mc.postToChat("Hit the launch pad")
#create the rocket next to the player at ground level
rocketPos = mc.player.getTilePos()
rocketPos.x += 5
rocketPos.y = mc.getHeight(rocketPos.x, rocketPos.z) - 1
launchpad = LaunchPad(mc, rocketPos)
rocket = Rocket(mc, rocketPos)
try:
#wait till the launch tnt is hit
launch = False
while not launch:
for hit in mc.events.pollBlockHits():
shapeblockhit = launchpad.getShapeBlock(
hit.pos.x, hit.pos.y, hit.pos.z)
if shapeblockhit != None:
if shapeblockhit.tag == "launch":
launch = True
#count down to blast off
for count in range(3, 0, -1):
mc.postToChat(str(count))
sleep(1)
mc.postToChat("Blast Off")
#launch the rocket
for up in range(0, 15):
rocket.moveBy(0, 1, 0)
#pitch the rocket over
pitch = 0
for up in range(0, 75):
#find out where the rocket should be pointing for its pitch
z, y = findPointOnCircle(0, 0, 1, pitch)
#rotate the rocket
rocket.rotate(0, pitch, 0)
#move the rocket
rocket.moveBy(0, y, z)
#increase the angle of pitch until it gets to 60 degrees
if pitch < 60: pitch += 3
finally:
rocket.clear()
launchpad.clear()
|
astro-pi/SpaceCRAFT
|
spacecraft/mcrocketlaunch.py
|
Python
|
bsd-3-clause
| 1,969
|
[
"BLAST"
] |
2ac74a51f5afe864e513dbe0b07e7a960578e0421b301aaa0653bb060341b704
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('olc_webportalv2.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^newmultiprojects/', include('olc_webportalv2.new_multisample.urls', namespace='new_multisample')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
forestdussault/olc_webportalv2
|
config/urls.py
|
Python
|
mit
| 1,664
|
[
"VisIt"
] |
ae51a17b621804e7f3d5e8c6fda5b67355baa3fabe3b44b1fb5cd9e1e41be548
|
# Steps specific to the day-view
from behave import step
from pages.day_view import DayViewPage
@step('we visit the view for day {day:Int}')
def step_impl(context, day):
page = DayViewPage(context)
page.visit(day)
|
abingham/accu-2017-elm-app
|
tests/features/steps/day_view.py
|
Python
|
mit
| 225
|
[
"VisIt"
] |
1b6824ff4ea5cddab994b6ca7223ac07c78d4220b5d01183d047630c77dbe54e
|
# -*- coding:utf-8 -*-
# ----------------------------------------------------------------------
# Copyright 2016 Juergen Probst
#
# This file is part of pyMPB.
#
# pyMPB is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyMPB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyMPB. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
from simulation import Simulation
from geometry import Geometry
from kspace import KSpaceTriangular, KSpace
from objects import Dielectric, Rod, Block
import defaults
import log
from utility import do_runmode, get_triangular_phc_waveguide_air_rods
from os import path, makedirs
import numpy as np
def TriHoles2D(
material, radius, numbands=8, k_interpolation=11,
resolution=32, mesh_size=7,
runmode='sim', num_processors=2,
save_field_patterns=True, convert_field_patterns=True,
containing_folder='./',
job_name_suffix='', bands_title_appendix='',
custom_k_space=None, modes=('te', 'tm')):
"""Create a 2D MPB Simulation of a triangular lattice of holes.
:param material:
can be a string (e.g. SiN, 4H-SiC-anisotropic_c_in_z; defined in
data.py) or just the epsilon value (float)
:param radius: the radius of holes in units of the lattice constant
:param numbands: number of bands to calculate
:param k_interpolation:
number of the k-vectors between every two of
the used high symmetry points Gamma, M, K and Gamma again, so the
total number of simulated k-vectors will be 3*k_interpolation + 4.
Only used if no custom_custom_k_space is provided.
:param resolution: described in MPB documentation
:param mesh_size: described in MPB documentation
:param runmode: can be one of the following:
* empty string : just create and return the simulation object
* 'ctl' : create the sim object and save the ctl file
* 'sim' (default): run the simulation and do all postprocessing
* 'postpc' : do all postprocessing; simulation should have run
before!
* 'display': display all pngs done during postprocessing. This is
the only mode that is interactive.
:param num_processors: number of processors used during simulation
:param save_field_patterns: indicates whether field pattern h5 files
are generated during the simulation (at points of high symmetry)
:param convert_field_patterns: indicates whether field pattern h5
files should be converted to png (only when postprocessing)
:param containing_folder: the path to the folder which will contain
the simulation subfolder.
:param job_name_suffix: Optionally specify a job_name_suffix
(appendix to the folder name etc.) which will be appended to the
jobname created automatically from the most important parameters.
:param bands_title_appendix: will be added to the title of the bands
diagram.
:param custom_k_space: By default, KSpaceTriangular with
k_interpolation interpolation steps are used. Provide any KSpace
object here to customize this. k_interpolation will then be ignored.
:param modes: a list of modes to run. Possible are 'te' and 'tm'.
Default: both
:return: the Simulation object
"""
mat = Dielectric(material)
geom = Geometry(
width=1,
height=1,
triangular=True,
objects=[
Rod(
x=0,
y=0,
material='air',
radius=radius)])
if isinstance(custom_k_space, KSpace):
kspace = custom_k_space
else:
kspace = KSpaceTriangular(
k_interpolation=k_interpolation,
use_uniform_interpolation=defaults.newmpb)
# points of interest: (output mode patterns at these points)
if save_field_patterns:
poi = kspace.points()[0:-1]
else:
poi = []
runcode = ''
for mode in modes:
if mode == 'te':
outputfunc = ' '.join(defaults.output_funcs_te)
else:
outputfunc = ' '.join(defaults.output_funcs_tm)
runcode += (
'(run-%s %s)\n' % (
mode, defaults.default_band_func(poi, outputfunc)
) +
'(print-dos 0 1.2 121)\n\n')
jobname = 'TriHoles2D_{0}_r{1:03.0f}'.format(
mat.name, radius * 1000)
sim = Simulation(
jobname=jobname + job_name_suffix,
geometry=geom,
kspace=kspace,
numbands=numbands,
resolution=resolution,
mesh_size=mesh_size,
initcode=defaults.default_initcode +
'(set! default-material {0})'.format(str(mat)),
postcode='',
runcode=runcode,
work_in_subfolder=path.join(
containing_folder, jobname + job_name_suffix),
clear_subfolder=runmode.startswith('s') or runmode.startswith('c'))
draw_bands_title = ('2D hex. PhC; {0}, radius={1:0.3f}'.format(
mat.name, geom.objects[0].radius) +
bands_title_appendix)
return do_runmode(
sim, runmode, num_processors, draw_bands_title,
plot_crop_y=True, # automatic cropping
convert_field_patterns=convert_field_patterns,
field_pattern_plot_filetype=defaults.field_dist_filetype,
# don't add gamma point a second time (index 3):
field_pattern_plot_k_selection=None,
x_axis_hint=[defaults.default_x_axis_hint, kspace][kspace.has_labels()]
)
def TriHolesSlab3D(
material, radius, thickness, numbands=8, k_interpolation=11,
resolution=32, mesh_size=7, supercell_z=6,
runmode='sim', num_processors=2,
save_field_patterns=True, convert_field_patterns=True,
containing_folder='./',
job_name_suffix='', bands_title_appendix='',
custom_k_space=None, modes=('zeven', 'zodd'),
substrate_material=None):
"""Create a 3D MPB Simulation of a slab with a triangular lattice of
holes.
:param material: can be a string (e.g. SiN,
4H-SiC-anisotropic_c_in_z; defined in data.py) or just the epsilon
value (float)
:param radius: the radius of holes in units of the lattice constant
:param thickness: slab thickness in units of the lattice constant
:param numbands: number of bands to calculate
:param k_interpolation: number of the k-vectors between every two of
the used high symmetry points Gamma, M, K and Gamma again, so the
total number of simulated k-vectors will be 3*k_interpolation + 4
:param resolution: described in MPB documentation
:param mesh_size: described in MPB documentation
:param supercell_z: the height of the supercell in units of the
lattice constant
:param runmode: can be one of the following:
* empty string : just create and return the simulation object
* 'ctl' : create the sim object and save the ctl file
* 'sim' (default): run the simulation and do all postprocessing
* 'postpc' : do all postprocessing; simulation should have run
before!
* 'display': display all pngs done during postprocessing. This is
the only mode that is interactive.
:param num_processors: number of processors used during simulation
:param save_field_patterns: indicates whether field pattern h5 files
are generated during the simulation (at points of high symmetry)
:param convert_field_patterns: indicates whether field pattern h5
files should be converted to png (only when postprocessing)
:param containing_folder: the path to the folder which will contain
the simulation subfolder.
:param job_name_suffix: Optionally specify a job_name_suffix
(appendix to the folder name etc.) which will be appended to the
jobname created automatically from the most important parameters.
:param bands_title_appendix: will be added to the title of the bands
diagram.
:param custom_k_space: By default, KSpaceTriangular with
k_interpolation interpolation steps are used. Provide any KSpace
object here to customize this. k_interpolation will then be ignored.
:param modes: a list of modes to run. Possible are 'zeven', 'zodd'
or '' (latter meaning no distinction). Default: ['zeven', 'zodd']
:param substrate_material: the material of an optional substrate,
see param material. Holes will not be extended into the substrate.
Default: None, i.e. the substrate is air.
:return: the Simulation object
"""
mat = Dielectric(material)
geom = Geometry(
width=1,
height=1,
depth=supercell_z,
triangular=True,
objects=[
Block(
x=0, y=0, z=0,
material=mat,
#make it bigger than computational cell, just in case:
size=(2, 2, thickness)),
Rod(
x=0,
y=0,
material='air',
radius=radius)])
if substrate_material:
geom.add_substrate(
Dielectric(substrate_material),
start_at=-0.5 * thickness)
if isinstance(custom_k_space, KSpace):
kspace = custom_k_space
else:
kspace = KSpaceTriangular(
k_interpolation=k_interpolation,
use_uniform_interpolation=defaults.newmpb)
# points of interest: (output mode patterns at these points)
if save_field_patterns:
poi = kspace.points()[0:-1]
else:
poi = []
runcode = ''
for mode in modes:
if mode == '':
runcode += (
'(run %s)\n' % (
defaults.default_band_func(
poi, ' '.join(defaults.output_funcs_other))
) +
'(print-dos 0 1.2 121)\n\n')
else:
if mode == 'zeven':
outputfunc = ' '.join(defaults.output_funcs_te)
else:
outputfunc = ' '.join(defaults.output_funcs_tm)
runcode += (
'(run-%s %s)\n' % (
mode, defaults.default_band_func(poi, outputfunc)
) +
'(print-dos 0 1.2 121)\n\n')
jobname = 'TriHolesSlab_{0}_r{1:03.0f}_t{2:03.0f}'.format(
mat.name, radius * 1000, thickness * 1000)
sim = Simulation(
jobname=jobname + job_name_suffix,
geometry=geom,
kspace=kspace,
numbands=numbands,
resolution=resolution,
mesh_size=mesh_size,
initcode=defaults.default_initcode,
postcode='',
runcode=runcode,
work_in_subfolder=path.join(
containing_folder, jobname + job_name_suffix),
clear_subfolder=runmode.startswith('s') or runmode.startswith('c'))
draw_bands_title = ('Hex. PhC slab; '
'{0}, thickness={1:0.3f}, radius={2:0.3f}'.format(
mat.name,
geom.objects[0].size[2],
geom.objects[1].radius) +
bands_title_appendix)
return do_runmode(
sim, runmode, num_processors, draw_bands_title,
plot_crop_y=0.8 / geom.substrate_index,
convert_field_patterns=convert_field_patterns,
field_pattern_plot_filetype=defaults.field_dist_filetype,
field_pattern_plot_k_selection=None,
x_axis_hint=[defaults.default_x_axis_hint, kspace][kspace.has_labels()]
)
def TriHoles2D_Waveguide(
material, radius, mode='te', numbands=8, k_steps=17,
supercell_size=5, resolution=32, mesh_size=7,
ydirection=False,
first_row_longitudinal_shift=0,
first_row_transversal_shift=0,
first_row_radius=None,
second_row_longitudinal_shift=0,
second_row_transversal_shift=0,
second_row_radius=None,
runmode='sim', num_processors=2,
projected_bands_folder='../projected_bands_repo',
plot_complete_band_gap=False,
save_field_patterns_kvecs=list(), save_field_patterns_bandnums=list(),
convert_field_patterns=False,
job_name_suffix='', bands_title_appendix='',
plot_crop_y=False, field_pattern_plot_k_selection=()):
"""Create a 2D MPB Simulation of a triangular lattice of holes, with
a waveguide along the nearest neighbor direction, i.e. Gamma->K
direction.
The simulation is done with a rectangular super cell.
Before the waveguide simulation, additional simulations of the
unperturbed structure will be run for projected bands data, if these
simulations where not run before.
:param material: can be a string (e.g. SiN,
4H-SiC-anisotropic_c_in_z; defined in data.py) or just the epsilon
value (float)
:param radius: the radius of holes in units of the lattice constant
:param mode: the mode to run. Possible are 'te' and 'tm'.
:param numbands: number of bands to calculate
:param k_steps: number of k steps along the waveguide direction
between 0 and 0.5 to simulate. This can also be a list of the
explicit k values (just scalar values for component along the
waveguide axis) to be simulated.
:param supercell_size: the length of the supercell perpendicular to
the waveguide, in units of sqrt(3) times the lattice constant. If it
is not a odd number, one will be added.
:param resolution: described in MPB documentation
:param mesh_size: described in MPB documentation
:param ydirection: set this if the waveguide should point along y,
otherwise (default) it will point along x. Use the default if you
want to use yparity data.
:param first_row_longitudinal_shift: shifts the holes next to the
waveguide by this amount, parallel to the waveguide direction.
:param first_row_transversal_shift: shifts the holes next to the
waveguide by this amount, perpendicular to the waveguide direction.
:param first_row_radius: The radius of the holes next to the
waveguide. If None (default), use radius.
:param second_row_longitudinal_shift: shifts the holes in the second
row next to the waveguide by this amount, parallel to the waveguide
direction
:param second_row_transversal_shift: shifts the holes in the second
row next to the waveguide by this amount, perpendicular to the
waveguide direction
:param second_row_radius: The radius of the holes in the second row
next to the waveguide. If None (default), use radius.
:param runmode: can be one of the following:
* empty string : just create and return the simulation object
* 'ctl' : create the sim object and save the ctl file
* 'sim' (default): run the simulation and do all postprocessing
* 'postpc' : do all postprocessing; simulation should have run
before!
* 'display': display all pngs done during postprocessing. This is
the only mode that is interactive.
:param num_processors: number of processors used during simulation
:param projected_bands_folder: the path to the folder which will
contain the simulations of the unperturbed PhC, which is needed for
the projections perpendicular to the waveguide direction. If the
folder contains simulations run before, their data will be reused.
:param plot_complete_band_gap: If this is False, the band gap will be a
function of the k component along the waveguide. For each k,
a simulation with unperturbed photonic crystal will be run to get
the data. If this is True, only one unperturbed simulation will be
run to find the full direction independent bandgap.
:param save_field_patterns_kvecs: a list of k-vectors (3-tuples),
which indicates where field pattern h5 files are generated during
the simulation (only at bands in save_field_patterns_bandnums)
:param save_field_patterns_bandnums: a list of band numbers (int,
starting at 1), which indicates where field pattern h5 files are
generated during the simulation (only at k-vectors in
save_field_patterns_kvecs)
:param convert_field_patterns: indicates whether field pattern h5
files should be converted to png (only when postprocessing)
:param job_name_suffix: Optionally specify a job_name_suffix
(appendix to the folder name etc.) which will be appended to the
jobname created automatically from the most important parameters.
:param bands_title_appendix: will be added to the title of the bands
diagram.
:param plot_crop_y:
the band diagrams are automatically cropped before the last band
if plot_crop_y is True, alternatively use plot_crop_y to specify
the max. y-value where the plot will be cropped.
:return: the Simulation object
"""
mat = Dielectric(material)
# first, make sure all data for projected bands exist, otherwise
# start their simulations.
unperturbed_jobname = 'TriHoles2D_{0}_r{1:03.0f}'.format(
mat.name, radius * 1000)
# look here for old simulations, and place new ones there:
repo = path.abspath(
path.join(
path.curdir,
projected_bands_folder,
unperturbed_jobname
)
)
# create path if not there yet:
if not path.exists(path.abspath(repo)):
makedirs(path.abspath(repo))
# these k points will be simulated (along waveguide):
if isinstance(k_steps, (int, float)):
k_steps = int(k_steps)
k_points = np.linspace(0, 0.5, num=k_steps, endpoint=True)
else:
k_points = np.array(k_steps)
# This list will be forwarded later to this defect simulation's
# post-process. It contains the folder paths of unperturbed
# simulations for each k-vec of this simulation (or only one simulation,
# if the plotted band gap does not change from k-vec to k-vec):
project_bands_list = []
if plot_complete_band_gap:
if mode == 'te':
# We only need a simulation of the first two bands at the M
# and the K point to get the band gap.
# first, see if we need to simulate:
jobname_suffix = '_for_gap'
jobname = unperturbed_jobname + jobname_suffix
project_bands_list.append(path.join(repo, jobname))
range_file_name = path.join(
repo, jobname, jobname + '_' + mode + '_ranges.csv')
if not path.isfile(range_file_name):
# does not exist, so start simulation:
log.info('unperturbed structure not yet simulated for '
'band gap. Running now...')
kspace = KSpace(
points_list=[(0, 0.5, 0), ('(/ -3)', '(/ 3)', 0)],
k_interpolation=0,
point_labels=['M', 'K'])
sim = TriHoles2D(
material=material,
radius=radius,
custom_k_space=kspace,
numbands=3, # 3 so the band plot looks better ;)
resolution=resolution,
mesh_size=mesh_size,
runmode='sim' if runmode.startswith('s') else '',
num_processors=num_processors,
containing_folder=repo,
save_field_patterns=False,
convert_field_patterns=False,
job_name_suffix=jobname_suffix,
bands_title_appendix=', for band gap',
modes=[mode]
)
if not sim:
log.error(
'an error occurred during simulation of unperturbed '
'structure. See the .out file in {0}'.format(
path.join(
repo, jobname
))
)
return
# Now, the _ranges.csv file is wrong, because we did not
# simulate the full K-Space, especially Gamma is
# missing. Correct the ranges so the first band starts
# at 0 and the second band is the last band and goes to
# a very high value. This way, there is only the band
# gap left between the first and second continuum bands.
# Load the _ranges.csv file to get the band gap:
ranges = np.loadtxt(range_file_name, delimiter=',', ndmin=2)
# tinker:
ranges[0, 1] = 0
ranges[1, 2] = ranges[1, 2] * 100
# save file again, drop higher bands:
np.savetxt(
range_file_name,
ranges[:2, :],
header='bandnum, min, max',
fmt=['%.0f', '%.6f', '%.6f'],
delimiter=', ')
else:
# For high refractive indices and big radius, there are some small
# gaps for TM modes. But we need to simulate more bands and
# more k-points than for the TE modes.
# I don't need it, so it is not implemented yet:
log.warning('plot_complete_band_gap not implemented for {0}'
' modes yet.'.format(mode))
else:
# Note: in the following, I use a triangular lattice, which is
# orientated such that the Gamma->K direction points towards y
# in cartesian coordinates. If ydirection is False, it does not
# matter, because the projected bands stay the same.
# In the triangular lattice, in the basis of its reciprocal
# basis vectors, this is the K' point, i.e. die boundary of the
# first brillouin zone in the rectangular lattice, onto which we
# need to project (see also : Steven G. Johnson et al., "Linear
# waveguides in photonic-crystal slabs", Phys. Rev. B, Vol. 62,
# Nr.12, 8212-8222 (2000); page 8216 & Fig. 8):
rectBZ_K = np.array((0.25, -0.25))
# the M point in the triangular lattice reciprocal basis, which
# points along +X (perpendicular to a waveguide in k_y
# direction): (note: if k_y is greater than 1/3, we leave the
# 1st BZ in +x direction. But this is OK and we calculate it
# anyway, because it does not change the projection. If we want
# to optimize calculation time some time, we could limit this.)
triBZ_M = np.array((0.5, 0.5))
# now, see if we need to simulate:
for ky in k_points:
jobname_suffix = '_projk{0:06.0f}'.format(ky*1e6)
jobname = unperturbed_jobname + jobname_suffix
project_bands_list.append(path.join(repo, jobname))
range_file_name = path.join(
repo, jobname, jobname + '_' + mode + '_ranges.csv')
if not path.isfile(range_file_name):
# does not exist, so start simulation:
log.info('unperturbed structure not yet simulated at '
'k_wg={0}. Running now...'.format(ky))
kspace = KSpace(
points_list=[
rectBZ_K * ky * 2,
rectBZ_K * ky * 2 + triBZ_M
],
k_interpolation=15,)
sim = TriHoles2D(
material=material,
radius=radius,
custom_k_space=kspace,
numbands=defaults.num_projected_bands,
resolution=resolution,
mesh_size=mesh_size,
runmode='sim' if runmode.startswith('s') else '',
num_processors=num_processors,
containing_folder=repo,
save_field_patterns=False,
convert_field_patterns=False,
job_name_suffix=jobname_suffix,
bands_title_appendix=', at k_wg={0:0.3f}'.format(ky),
modes=[mode]
)
if not sim:
log.error(
'an error occurred during simulation of unperturbed '
'structure. See the .out file in {0}'.format(
path.join(
repo, jobname
))
)
return
# If a shift is used, inversion symmetry is broken:
if ((first_row_longitudinal_shift or second_row_longitudinal_shift) and
'mpbi' in defaults.mpb_call):
log.info('default MPB to use includes inversion symmetry: '
'{0}. '.format(defaults.mpb_call) +
'Shift of holes specified, which breaks inv. symmetry. '
'Will fall back to MPB without inv. symm.: {0}'.format(
defaults.mpb_call.replace('mpbi', 'mpb')
))
defaults.mpb_call = defaults.mpb_call.replace('mpbi', 'mpb')
# make it odd:
if supercell_size % 2 == 0:
supercell_size += 1
# Create geometry and add objects.
objects = get_triangular_phc_waveguide_air_rods(
radius=radius,
supercell_size=supercell_size,
ydirection=ydirection,
first_row_longitudinal_shift=first_row_longitudinal_shift,
first_row_transversal_shift=first_row_transversal_shift,
first_row_radius=first_row_radius,
second_row_longitudinal_shift=second_row_longitudinal_shift,
second_row_transversal_shift=second_row_transversal_shift,
second_row_radius=second_row_radius)
if ydirection:
geom = Geometry(
width='(* (sqrt 3) %i)' % supercell_size,
height=1,
triangular=False,
objects=objects
)
kspaceW1 = KSpace(
points_list=[(0, ky, 0) for ky in k_points],
k_interpolation=0,
)
else:
geom = Geometry(
width=1,
height='(* (sqrt 3) %i)' % supercell_size,
triangular=False,
objects=objects
)
kspaceW1 = KSpace(
points_list=[(kx, 0, 0) for kx in k_points],
k_interpolation=0,
)
jobname = 'TriHoles2D_W1_{0}_r{1:03.0f}'.format(
mat.name, radius * 1000)
if mode == 'te':
outputfuncs = defaults.output_funcs_te
else:
outputfuncs = defaults.output_funcs_tm
runcode = ''
if defaults.newmpb:
runcode = '(optimize-grid-size!)\n\n'
if save_field_patterns_bandnums and save_field_patterns_kvecs:
runcode += (
';function to determine whether an item x is member of list:\n'
'(define (member? x list)\n'
' (cond (\n'
' ;false if the list is empty:\n'
' (null? list) #f )\n'
' ;true if first item (car) equals x:\n'
' ( (eqv? x (car list)) #t )\n'
' ;else, drop first item (cdr) and make recursive call:\n'
' ( else (member? x (cdr list)) )\n'
' ))\n\n' +
'(define output-bands-list (list {0}))\n\n'.format(' '.join(
map(str, save_field_patterns_bandnums))) +
'(define (output-func bnum)\n'
' (if (member? bnum output-bands-list)\n'
' (begin\n' +
''.join(12 * ' ' + '({0} bnum)\n'.format(func)
for func in outputfuncs) +
' )\n'
' ))\n\n'
'(run-{0} {1})\n'.format(
mode,
defaults.default_band_func(
save_field_patterns_kvecs, 'output-func')) +
'(print-dos 0 1.2 121)\n\n'
)
else:
runcode += ('(run-{0} {1})\n'.format(
mode,
defaults.default_band_func([], None)
) +
'(print-dos 0 1.2 121)\n\n')
sim = Simulation(
jobname=jobname + job_name_suffix,
geometry=geom,
kspace=kspaceW1,
numbands=numbands,
resolution=resolution,
mesh_size=mesh_size,
initcode=defaults.default_initcode +
'(set! default-material {0})'.format(str(mat)),
postcode='',
runcode=runcode,
clear_subfolder=runmode.startswith('s') or runmode.startswith('c'))
draw_bands_title = (
'2D hex. PhC W1; {0}, radius={1:0.3f}'.format(
mat.name, radius) +
bands_title_appendix)
return do_runmode(
sim, runmode, num_processors, draw_bands_title,
plot_crop_y=plot_crop_y,
convert_field_patterns=convert_field_patterns,
field_pattern_plot_k_selection=field_pattern_plot_k_selection,
field_pattern_plot_filetype=defaults.field_dist_filetype,
x_axis_hint=[5, "{1}" if ydirection else "{0}"],
project_bands_list=project_bands_list,
color_by_parity='y'
)
def TriHolesSlab3D_Waveguide(
material, radius, thickness, mode='zeven', numbands=8, k_steps=17,
supercell_size=5, supercell_z=6,
resolution=32, mesh_size=7,
ydirection=False,
first_row_longitudinal_shift=0,
first_row_transversal_shift=0,
first_row_radius=None,
second_row_longitudinal_shift=0,
second_row_transversal_shift=0,
second_row_radius=None,
runmode='sim', num_processors=2,
projected_bands_folder='../projected_bands_repo',
plot_complete_band_gap=False,
save_field_patterns_kvecs=list(), save_field_patterns_bandnums=list(),
convert_field_patterns=False,
job_name_suffix='', bands_title_appendix='',
plot_crop_y=False, field_pattern_plot_k_selection=()):
"""Create a 3D MPB Simulation of a slab with a triangular lattice of
holes, with a waveguide along the nearest neighbor direction, i.e.
Gamma->K direction.
The simulation is done with a cubic super cell.
Before the waveguide simulation, additional simulations of the
unperturbed structure will be run for projected bands data, if these
simulations where not run before.
:param material: can be a string (e.g. SiN,
4H-SiC-anisotropic_c_in_z; defined in data.py) or just the epsilon
value (float)
:param radius: the radius of holes in units of the lattice constant
:param thickness: slab thickness in units of the lattice constant
:param mode: the mode to run. Possible are 'zeven' and 'zodd'.
:param numbands: number of bands to calculate
:param k_steps: number of k steps along the waveguide direction
between 0 and 0.5 to simulate. This can also be a list of the
explicit k values (just scalar values for component along the
waveguide axis) to be simulated.
:param supercell_size: the length of the supercell perpendicular to the
waveguide, in units of sqrt(3) times the lattice constant. If it is
not a odd number, one will be added.
:param supercell_z: the height of the supercell in units of the
lattice constant
:param resolution: described in MPB documentation
:param mesh_size: described in MPB documentation
:param ydirection: set this if the waveguide should point along y,
otherwise (default) it will point along x. Use the default if you
want to use yparity data.
:param first_row_longitudinal_shift: shifts the holes next to the
waveguide by this amount, parallel to the waveguide direction.
:param first_row_transversal_shift: shifts the holes next to the
waveguide by this amount, perpendicular to the waveguide direction.
:param first_row_radius: The radius of the holes next to the
waveguide. If None (default), use radius.
:param second_row_longitudinal_shift: shifts the holes in the second
row next to the waveguide by this amount, parallel to the waveguide
direction
:param second_row_transversal_shift: shifts the holes in the second
row next to the waveguide by this amount, perpendicular to the
waveguide direction
:param second_row_radius: The radius of the holes in the second row
next to the waveguide. If None (default), use radius.
:param runmode: can be one of the following:
* empty string : just create and return the simulation object
* 'ctl' : create the sim object and save the ctl file
* 'sim' (default): run the simulation and do all postprocessing
* 'postpc' : do all postprocessing; simulation should have run
before!
* 'display': display all pngs done during postprocessing. This is
the only mode that is interactive.
:param num_processors: number of processors used during simulation
:param projected_bands_folder: the path to the folder which will
contain the simulations of the unperturbed PhC, which is needed for
the projections perpendicular to the waveguide direction. If the
folder contains simulations run before, their data will be reused.
:param plot_complete_band_gap: If this is False, the band gap will be a
function of the k component along the waveguide. For each k,
a simulation with unperturbed photonic crystal will be run to get
the data. If this is True, only one unperturbed simulation will be
run to find the full direction independent bandgap.
:param save_field_patterns_kvecs: a list of k-vectors (3-tuples),
which indicates where field pattern h5 files are generated during
the simulation (only at bands in save_field_patterns_bandnums)
:param save_field_patterns_bandnums: a list of band numbers (int,
starting at 1), which indicates where field pattern h5 files are
generated during the simulation (only at k-vectors in
save_field_patterns_kvecs)
:param convert_field_patterns: indicates whether field pattern h5
files should be converted to png (only when postprocessing)
:param job_name_suffix: Optionally specify a job_name_suffix
(appendix to the folder name etc.) which will be appended to the
jobname created automatically from the most important parameters.
:param bands_title_appendix: will be added to the title of the bands
diagram.
:param plot_crop_y:
the band diagrams are automatically cropped before the last band
if plot_crop_y is True, alternatively use plot_crop_y to specify
the max. y-value where the plot will be cropped.
:return: the Simulation object
"""
mat = Dielectric(material)
# first, make sure all data for projected bands exist, otherwise
# start their simulations.
unperturbed_jobname = 'TriHolesSlab_{0}_r{1:03.0f}_t{2:03.0f}'.format(
mat.name, radius * 1000, thickness * 1000)
# look here for old simulations, and place new ones there:
repo = path.abspath(
path.join(
path.curdir,
projected_bands_folder,
unperturbed_jobname
)
)
# create path if not there yet:
if not path.exists(path.abspath(repo)):
makedirs(path.abspath(repo))
# these k points will be simulated (along waveguide):
if isinstance(k_steps, (int, float)):
k_steps = int(k_steps)
k_points = np.linspace(0, 0.5, num=k_steps, endpoint=True)
else:
k_points = np.array(k_steps)
# This list will be forwarded later to this defect simulation's
# post-process. It contains the folder paths of unperturbed
# simulations for each k-vec of this simulation (or only one simulation,
# if the plotted band gap does not change from k-vec to k-vec):
project_bands_list = []
if plot_complete_band_gap:
if mode == 'zeven':
# We only need a simulation of the first two bands at the M
# and the K point to get the band gap.
# first, see if we need to simulate:
jobname_suffix = '_for_gap'
jobname = unperturbed_jobname + jobname_suffix
project_bands_list.append(path.join(repo, jobname))
range_file_name = path.join(
repo, jobname, jobname + '_' + mode + '_ranges.csv')
if not path.isfile(range_file_name):
# does not exist, so start simulation:
log.info('unperturbed structure not yet simulated for '
'band gap. Running now...')
kspace = KSpace(
points_list=[(0, 0.5, 0), ('(/ -3)', '(/ 3)', 0)],
k_interpolation=0,
point_labels=['M', 'K'])
sim = TriHolesSlab3D(
material=material,
radius=radius,
thickness=thickness,
custom_k_space=kspace,
numbands=3, # 3 so the band plot looks better ;)
resolution=resolution,
mesh_size=mesh_size,
supercell_z=supercell_z,
runmode='sim' if runmode.startswith('s') else '',
num_processors=num_processors,
containing_folder=repo,
save_field_patterns=False,
convert_field_patterns=False,
job_name_suffix=jobname_suffix,
bands_title_appendix=', for band gap',
modes=[mode]
)
if not sim:
log.error(
'an error occurred during simulation of unperturbed '
'structure. See the .out file in {0}'.format(
path.join(
repo, jobname
))
)
return
# Now, the _ranges.csv file is wrong, because we did not
# simulate the full K-Space, especially Gamma is
# missing. Correct the ranges so the first band starts
# at 0 and the second band is the last band and goes to
# a very high value. This way, there is only the band
# gap left between the first and second continuum bands.
# Load the _ranges.csv file to get the band gap:
ranges = np.loadtxt(range_file_name, delimiter=',', ndmin=2)
# tinker:
ranges[0, 1] = 0
ranges[1, 2] = ranges[1, 2] * 100
# save file again, drop higher bands:
np.savetxt(
range_file_name,
ranges[:2, :],
header='bandnum, min, max',
fmt=['%.0f', '%.6f', '%.6f'],
delimiter=', ')
else:
# For high refractive indices and big radius, there are some
# small gaps for TM modes. But we need to simulate more
# bands and more k-points than for the TE modes. This is
# especially difficult (or even impossible?), since
# quasi-guided PhC bands (which narrow the band gap) are
# hidden by continuum modes above the light line in 3D.
# I don't need it, so it is not implemented yet:
log.warning('plot_complete_band_gap not implemented for {0}'
' modes yet.'.format(mode))
else:
# Note: in the following, I use a triangular lattice, which is
# orientated such that the Gamma->K direction points towards y
# in cartesian coordinates. If ydirection is False, it does not
# matter, because the projected bands stay the same.
# In the triangular lattice, in the basis of its reciprocal
# basis vectors, this is the K' point, i.e. die boundary of the
# first brillouin zone in the rectangular lattice, onto which we
# need to project (see also : Steven G. Johnson et al., "Linear
# waveguides in photonic-crystal slabs", Phys. Rev. B, Vol. 62,
# Nr.12, 8212-8222 (2000); page 8216 & Fig. 8):
rectBZ_K = np.array((0.25, -0.25))
# the M point in the triangular lattice reciprocal basis, which
# points along +X (perpendicular to a waveguide in k_y
# direction): (note: if k_y is greater than 1/3, we leave the
# 1st BZ in +x direction. But this is OK and we calculate it
# anyway, because it does not change the projection. If we want
# to optimize calculation time some time, we could limit this.)
triBZ_M = np.array((0.5, 0.5))
# now, see if we need to simulate:
for ky in k_points:
jobname_suffix = '_projk{0:06.0f}'.format(ky*1e6)
jobname = unperturbed_jobname + jobname_suffix
project_bands_list.append(path.join(repo, jobname))
range_file_name = path.join(
repo, jobname, jobname + '_' + mode + '_ranges.csv')
if not path.isfile(range_file_name):
# does not exist, so start simulation:
log.info('unperturbed structure not yet simulated at '
'k_wg={0}. Running now...'.format(ky))
kspace = KSpace(
points_list=[
rectBZ_K * ky * 2,
rectBZ_K * ky * 2 + triBZ_M
],
k_interpolation=15,)
sim = TriHolesSlab3D(
material=material,
radius=radius,
thickness=thickness,
custom_k_space=kspace,
numbands=defaults.num_projected_bands,
resolution=resolution,
supercell_z=supercell_z,
mesh_size=mesh_size,
runmode='sim' if runmode.startswith('s') else '',
num_processors=num_processors,
containing_folder=repo,
save_field_patterns=False,
convert_field_patterns=False,
job_name_suffix=jobname_suffix,
bands_title_appendix=', at k_wg={0:0.3f}'.format(ky),
modes=[mode]
)
if not sim:
log.error(
'an error occurred during simulation of unperturbed '
'structure. See the .out file in {0}'.format(
path.join(
repo, jobname
))
)
return
# If a shift is used, inversion symmetry is broken:
if ((first_row_longitudinal_shift or second_row_longitudinal_shift) and
'mpbi' in defaults.mpb_call):
log.info('default MPB to use includes inversion symmetry: '
'{0}. '.format(defaults.mpb_call) +
'Shift of holes specified, which breaks inv. symmetry. '
'Will fall back to MPB without inv. symm.: {0}'.format(
defaults.mpb_call.replace('mpbi', 'mpb')
))
defaults.mpb_call = defaults.mpb_call.replace('mpbi', 'mpb')
# make it odd:
if supercell_size % 2 == 0:
supercell_size += 1
# Create geometry and add objects.
objects = get_triangular_phc_waveguide_air_rods(
radius=radius,
supercell_size=supercell_size,
ydirection=ydirection,
first_row_longitudinal_shift=first_row_longitudinal_shift,
first_row_transversal_shift=first_row_transversal_shift,
first_row_radius=first_row_radius,
second_row_longitudinal_shift=second_row_longitudinal_shift,
second_row_transversal_shift=second_row_transversal_shift,
second_row_radius=second_row_radius)
if ydirection:
geom = Geometry(
width='(* (sqrt 3) %i)' % supercell_size,
height=1,
depth=supercell_z,
triangular=False,
objects=(
[Block(
x=0, y=0, z=0,
material=mat,
# make it bigger than computational cell, just in case:
size=(
'(* (sqrt 3) %i)' % (supercell_size + 1),
2,
thickness))
] +
objects
)
)
kspaceW1 = KSpace(
points_list=[(0, ky, 0) for ky in k_points],
k_interpolation=0,
)
else:
geom = Geometry(
width=1,
height='(* (sqrt 3) %i)' % supercell_size,
depth=supercell_z,
triangular=False,
objects=(
[Block(
x=0, y=0, z=0,
material=mat,
# make it bigger than computational cell, just in case:
size=(
2,
'(* (sqrt 3) %i)' % (supercell_size + 1),
thickness))
] +
objects
)
)
kspaceW1 = KSpace(
points_list=[(kx, 0, 0) for kx in k_points],
k_interpolation=0,
)
jobname = 'TriHolesSlab_W1_{0}_r{1:03.0f}_t{2:03.0f}'.format(
mat.name, radius * 1000, thickness * 1000)
if mode == 'zeven':
outputfuncs = defaults.output_funcs_te
else:
outputfuncs = defaults.output_funcs_tm
runcode = ''
if defaults.newmpb:
runcode = '(optimize-grid-size!)\n\n'
if save_field_patterns_bandnums and save_field_patterns_kvecs:
runcode += (
';function to determine whether an item x is member of list:\n'
'(define (member? x list)\n'
' (cond (\n'
' ;false if the list is empty:\n'
' (null? list) #f )\n'
' ;true if first item (car) equals x:\n'
' ( (eqv? x (car list)) #t )\n'
' ;else, drop first item (cdr) and make recursive call:\n'
' ( else (member? x (cdr list)) )\n'
' ))\n\n' +
'(define output-bands-list (list {0}))\n\n'.format(' '.join(
map(str, save_field_patterns_bandnums))) +
'(define (output-func bnum)\n'
' (if (member? bnum output-bands-list)\n'
' (begin\n' +
''.join(12 * ' ' + '({0} bnum)\n'.format(func)
for func in outputfuncs) +
' )\n'
' ))\n\n'
'(run-{0} {1})\n'.format(
mode,
defaults.default_band_func(
save_field_patterns_kvecs, 'output-func')) +
'(print-dos 0 1.2 121)\n\n'
)
else:
runcode += ('(run-{0} {1})\n'.format(
mode,
defaults.default_band_func([], None)
) +
'(print-dos 0 1.2 121)\n\n')
sim = Simulation(
jobname=jobname + job_name_suffix,
geometry=geom,
kspace=kspaceW1,
numbands=numbands,
resolution=resolution,
mesh_size=mesh_size,
initcode=defaults.default_initcode,
postcode='',
runcode=runcode,
clear_subfolder=runmode.startswith('s') or runmode.startswith('c'))
draw_bands_title = (
'Hex. PhC slab W1; {0}, thickness={1:0.3f}, radius={2:0.3f}'.format(
mat.name,
geom.objects[0].size[2],
radius) +
bands_title_appendix)
return do_runmode(
sim, runmode, num_processors, draw_bands_title,
plot_crop_y=plot_crop_y,
convert_field_patterns=convert_field_patterns,
field_pattern_plot_k_selection=field_pattern_plot_k_selection,
field_pattern_plot_filetype=defaults.field_dist_filetype,
x_axis_hint=[5, "{1}" if ydirection else "{0}"],
project_bands_list=project_bands_list,
color_by_parity='y'
)
def TriHoles2D_Waveguide_effective_epsilon_frequency_dependent(
epsilon_cubspline_knots, epsilon_cubspline_coeffs,
band_number, init_frequency,
radius, mode='te', k_steps=17,
supercell_size=5, resolution=32, mesh_size=7,
ydirection=False, ensure_y_parity='no',
first_row_longitudinal_shift=0,
first_row_transversal_shift=0,
first_row_radius=None,
second_row_longitudinal_shift=0,
second_row_transversal_shift=0,
second_row_radius=None,
runmode='sim', num_processors=2,
save_field_patterns_kvecs=list(),
convert_field_patterns=False,
containing_folder='./',
job_name_suffix='', bands_title_appendix='',
plot_crop_y=False, extra_bands=0, gap=None,
field_pattern_plot_k_selection=None):
"""Create a 2D MPB Simulation of a triangular lattice of holes, with
a waveguide along the nearest neighbor direction, i.e. Gamma->K
direction.
The background material epsilon will be dependent on frequency. For this
to work with MPB, for each k-vec a number of simulations must be run
until the frequency of a single band of interest (band_number) and the
frequency used for the material converge to a common value.
The simulation is done with a rectangular super cell.
:param epsilon_cubspline_knots:
An array of frequencies, separating a frequency interval into
segments. In each segment, the material epsilon is defined by
a cubic polynomial. Outside the interval spanned by these
frequencies, epsilon will be extrapolated by the polynomials in
the outermost segments. If the epsilon function was fitted with
a ``scipy.interpolate.CubicSpline``, this is its
``CubicSpline.x`` attribute.
:param epsilon_cubspline_coeffs:
A matrix of floats with shape (4, n-1), with `n` the length of
epsilon_cubspline_knots; ``epsilon_cubspline_coeffs[k, i]`` is
the coefficient for the polynomial ``(x-x[i])**(3-k)`` on
the segment between ``epsilon_cubspline_knots[i]`` and
``epsilon_cubspline_knots[i+1]``. If the epsilon function was
fitted with a ``scipy.interpolate.CubicSpline``, this is its
``CubicSpline.c`` attribute.
:param band_number: The simulation can only be run for a single
band. Choose it here. The band with the lowest frequency is
``band_number=1``.
:param init_frequency: A crude initial guess for the frequency
:param radius:
the radius of holes in units of the lattice constant
:param mode:
the mode to run. Possible are 'te' and 'tm'.
:param k_steps: number of k steps along the waveguide direction
between 0 and 0.5 to simulate. This can also be a list of the
explicit k values (just scalar values for component along the
waveguide axis) to be simulated.
:param supercell_size: the length of the supercell perpendicular to
the waveguide, in units of sqrt(3) times the lattice constant.
If it is not a odd number, one will be added.
:param resolution: described in MPB documentation
:param mesh_size: described in MPB documentation
:param ydirection: set this if the waveguide should point along y,
otherwise (default) it will point along x. Use the default if
you want to use yparity data.
:param ensure_y_parity: (default: 'no')
This can be either 'even' or 'odd', in which case the parity
of *band_number* is checked in an additional quick simulation
run at *init_frequency* before the real simulation starts.
If the parity does not match the desired parity, *band_number*
is increased until it matches. This is done separately for each
k-vector, and starts each time at the originally given
*band_number* again. If this feature is used, *extra_bands* is
automatically increased by 2.
:param first_row_longitudinal_shift: shifts the holes next to the
waveguide by this amount, parallel to the waveguide direction.
:param first_row_transversal_shift: shifts the holes next to the
waveguide by this amount, perpendicular to the waveguide
direction.
:param first_row_radius: The radius of the holes next to the
waveguide. If None (default), use radius.
:param second_row_longitudinal_shift: shifts the holes in the second
row next to the waveguide by this amount, parallel to the
waveguide direction
:param second_row_transversal_shift: shifts the holes in the second
row next to the waveguide by this amount, perpendicular to the
waveguide direction
:param second_row_radius: The radius of the holes in the second row
next to the waveguide. If None (default), use radius.
:param runmode: can be one of the following:
* empty string : just create and return the simulation object
* 'ctl' : create the sim object and save the ctl file
* 'sim' (default): run the simulation and do all postprocessing
* 'postpc' : do all postprocessing; simulation should have run
before!
* 'display': display all pngs done during postprocessing. This is
the only mode that is interactive.
:param num_processors: number of processors used during simulation
:param save_field_patterns_kvecs: a list of k-vectors (3-tuples),
which indicates where field pattern h5 files are generated during
the simulation
:param convert_field_patterns: indicates whether field pattern h5
files should be converted to png (only when postprocessing)
:param containing_folder: the path to the folder which will contain
the simulation subfolder.
:param job_name_suffix: Optionally specify a job_name_suffix
(appendix to the folder name etc.) which will be appended to the
jobname created automatically from the most important parameters.
:param bands_title_appendix: will be added to the title of the bands
diagram.
:param plot_crop_y:
Optionally define a min. and max. frequency value (in a 2-tuple)
where the band diagram will be cropped.
:param extra_bands:
number of extra bands to calculate above band_number. Their
frequencies will be faulty since they were calculated with the
wrong effective epsilon, but perhaps you need them for
reference.
:param gap:
Optional tuple of the lower and upper band gap frequencies,
if you want to add the gap to the band diagram (default: None).
:return: the Simulation object
"""
# these k points will be simulated (along waveguide):
if isinstance(k_steps, (int, float)):
k_steps = int(k_steps)
k_points = np.linspace(0, 0.5, num=k_steps, endpoint=True)
else:
k_points = np.array(k_steps)
# If a longitudinal shift is used, inversion symmetry is broken:
if ((first_row_longitudinal_shift or second_row_longitudinal_shift) and
'mpbi' in defaults.mpb_call):
log.info('default MPB to use includes inversion symmetry: '
'{0}. '.format(defaults.mpb_call) +
'Shift of holes specified, which breaks inv. symmetry. '
'Will fall back to MPB without inv. symm.: {0}'.format(
defaults.mpb_call.replace('mpbi', 'mpb')
))
defaults.mpb_call = defaults.mpb_call.replace('mpbi', 'mpb')
# make it odd:
if supercell_size % 2 == 0:
supercell_size += 1
# Create geometry and add objects.
objects = get_triangular_phc_waveguide_air_rods(
radius=radius,
supercell_size=supercell_size,
ydirection=ydirection,
first_row_longitudinal_shift=first_row_longitudinal_shift,
first_row_transversal_shift=first_row_transversal_shift,
first_row_radius=first_row_radius,
second_row_longitudinal_shift=second_row_longitudinal_shift,
second_row_transversal_shift=second_row_transversal_shift,
second_row_radius=second_row_radius)
if ydirection:
geom = Geometry(
width='(* (sqrt 3) %i)' % supercell_size,
height=1,
triangular=False,
objects=objects
)
kspaceW1 = KSpace(
points_list=[(0, ky, 0) for ky in k_points],
k_interpolation=0,
)
else:
geom = Geometry(
width=1,
height='(* (sqrt 3) %i)' % supercell_size,
triangular=False,
objects=objects
)
kspaceW1 = KSpace(
points_list=[(kx, 0, 0) for kx in k_points],
k_interpolation=0,
)
jobname = (
'TriHoles2D_W1_effeps_band{0:02.0f}{1}_r{2:03.0f}_res{3:03.0f}'.format(
band_number,
ensure_y_parity if ensure_y_parity in ['even', 'odd'] else '',
radius * 1000, resolution))
initcode = '\n'.join([
defaults.default_initcode,
'; initial guess for frequency:',
'(define init-freq {0:.3f})\n'.format(init_frequency),
'; the proper epsilon will be applied to the frequency of this band:',
'(define bandnum {0:.0f})'.format(band_number)])
runcode = ''
if defaults.newmpb:
runcode = '(optimize-grid-size!)\n\n'
epsknots = ''.join(
'\n ' + ' '.join(
str(x) for x in epsilon_cubspline_knots[i:i + 4]
)
for i in range(0, len(epsilon_cubspline_knots), 4)
)
epscoeffs = ''.join(
'\n (' + ''.join(
'\n ' + ' '.join(
str(x) for x in epsilon_cubspline_coeffs[j, i:i + 4]
)
for i in range(0, len(epsilon_cubspline_coeffs[j]), 4)
) + '\n )'
for j in range(len(epsilon_cubspline_coeffs))
)
if mode == 'te':
outputfuncs = defaults.output_funcs_te
else:
outputfuncs = defaults.output_funcs_tm
bandfuncs = ("\n" + 20 * " ").join(
map(str.strip,
defaults.default_band_func(
save_field_patterns_kvecs, ' '.join(outputfuncs)
).strip().split('\n')))
rundict = {
'epsknots': epsknots,
'epscoeffs': epscoeffs,
'mode_lower': mode.lower(),
'mode_upper': mode.upper(),
'bandfuncs': bandfuncs}
runcode += defaults.template_epsilon_function % rundict
if ensure_y_parity in ['even', 'odd']:
extra_bands += 2
runcode += (
'\n'
'(define bandnum-bak bandnum)\n'
'(define (get-y-%s-bandnum initial-b eps kvec)\n'
' (let ( (res resolution)\n'
' (pars \'()) )\n'
' ; run at lower resolution\n'
' (set! resolution (/ resolution 2))\n'
' (print "sim-info: running sim to check y-parity")\n'
' (simulate-at-eps eps kvec bandnum %s true)\n'
' (set! resolution res)\n'
' (set! pars (compute-yparities))\n'
' (do ( (bi (- initial-b 1) (+ bi 1)) )\n'
' ( (%s (list-ref pars bi)) (+ bi 1)))\n'
'))\n\n'% (
ensure_y_parity, mode.upper(),
['> -0.5', '< 0.5'][['odd', 'even'].index(ensure_y_parity)])
)
preparation = (
'(set! bandnum '
'(get-y-%s-bandnum '
'bandnum-bak (epsfunc init-freq) kvec))' % ensure_y_parity)
else:
preparation = ''
rundict['preparation'] = preparation
runcode += defaults.template_runcode_freq_dependent_epsilon % rundict
if "result" not in defaults.grep_datanames:
defaults.grep_datanames.append("result")
sim = Simulation(
jobname=jobname + job_name_suffix,
geometry=geom,
kspace=kspaceW1,
numbands=band_number + extra_bands,
resolution=resolution,
mesh_size=mesh_size,
initcode=initcode,
postcode='',
runcode=runcode,
work_in_subfolder=path.join(
containing_folder, jobname + job_name_suffix),
clear_subfolder=runmode.startswith('s') or runmode.startswith('c'))
draw_bands_title = (
'Hex. PhC W1; band {0:02.0f}, radius={1:0.3f}'.format(
band_number, radius) +
bands_title_appendix)
return do_runmode(
sim, runmode, num_processors, draw_bands_title,
plot_crop_y=plot_crop_y,
convert_field_patterns=convert_field_patterns,
field_pattern_plot_k_selection=field_pattern_plot_k_selection,
field_pattern_plot_filetype=defaults.field_dist_filetype,
x_axis_hint=[5, "{1}" if ydirection else "{0}"],
project_bands_list=gap,
color_by_parity='y'
)
def TriHoles2D_Waveguide_effective_epsilon_k_dependent(
epsilon_cubspline_knots, epsilon_cubspline_coeffs,
band_number, radius, mode='te', k_steps=17,
supercell_size=5, resolution=32, mesh_size=7,
ydirection=False, ensure_y_parity='no',
first_row_longitudinal_shift=0,
first_row_transversal_shift=0,
first_row_radius=None,
second_row_longitudinal_shift=0,
second_row_transversal_shift=0,
second_row_radius=None,
runmode='sim', num_processors=2,
save_field_patterns_kvecs=list(),
convert_field_patterns=False,
containing_folder='./',
job_name_suffix='', bands_title_appendix='',
plot_crop_y=False, extra_bands=0, gap=None,
field_pattern_plot_k_selection=None):
"""Create a 2D MPB Simulation of a triangular lattice of holes, with
a waveguide along the nearest neighbor direction, i.e. Gamma->K
direction.
The background material epsilon will be dependent on k in waveguide
direction.
The simulation is done with a rectangular super cell.
:param epsilon_cubspline_knots:
An array of scalar k-values, separating a range of k values into
segments. In each segment, the material epsilon is defined by
a cubic polynomial. Outside the interval spanned by these
k values, epsilon will be extrapolated by the polynomials in
the outermost segments. If the epsilon function was fitted with
a ``scipy.interpolate.CubicSpline``, this is its
``CubicSpline.x`` attribute.
:param epsilon_cubspline_coeffs:
A matrix of floats with shape (4, n-1), with `n` the length of
epsilon_cubspline_knots; ``epsilon_cubspline_coeffs[k, i]`` is
the coefficient for the polynomial ``(x-x[i])**(3-k)`` on
the segment between ``epsilon_cubspline_knots[i]`` and
``epsilon_cubspline_knots[i+1]``. If the epsilon function was
fitted with a ``scipy.interpolate.CubicSpline``, this is its
``CubicSpline.c`` attribute.
:param band_number: The effective epsilon function is usually
only valid for a single band. Choose it here. The band with
the lowest frequency is ``band_number=1``.
:param radius:
the radius of holes in units of the lattice constant
:param mode:
the mode to run. Possible are 'te' and 'tm'.
:param k_steps: number of k steps along the waveguide direction
between 0 and 0.5 to simulate. This can also be a list of the
explicit k values (just scalar values for component along the
waveguide axis) to be simulated.
:param supercell_size: the length of the supercell perpendicular to
the waveguide, in units of sqrt(3) times the lattice constant.
If it is not a odd number, one will be added.
:param resolution: described in MPB documentation
:param mesh_size: described in MPB documentation
:param ydirection: set this if the waveguide should point along y,
otherwise (default) it will point along x. Use the default if
you want to use yparity data.
:param ensure_y_parity: (default: 'no')
This can be either 'even' or 'odd', in which case the parities
of the simulated bands are checked to find the right band to return
in the results If field patterns are exported, they will only be
exported at these bands. For 'even', the first y-even band starting
from *band_number* is selected, for 'odd' a little more
sophisticated algorithm utilizing parities and group velocities
is used to find the characteristic y-odd waveguide band.
If this feature is used, *extra_bands* is automatically increased
by 2, but this is not enough if 'odd' is used, where *extra_bands*
should be manually set to more than 10 or so.
:param first_row_longitudinal_shift: shifts the holes next to the
waveguide by this amount, parallel to the waveguide direction.
:param first_row_transversal_shift: shifts the holes next to the
waveguide by this amount, perpendicular to the waveguide
direction.
:param first_row_radius: The radius of the holes next to the
waveguide. If None (default), use radius.
:param second_row_longitudinal_shift: shifts the holes in the second
row next to the waveguide by this amount, parallel to the
waveguide direction
:param second_row_transversal_shift: shifts the holes in the second
row next to the waveguide by this amount, perpendicular to the
waveguide direction
:param second_row_radius: The radius of the holes in the second row
next to the waveguide. If None (default), use radius.
:param runmode: can be one of the following:
* empty string : just create and return the simulation object
* 'ctl' : create the sim object and save the ctl file
* 'sim' (default): run the simulation and do all postprocessing
* 'postpc' : do all postprocessing; simulation should have run
before!
* 'display': display all pngs done during postprocessing. This is
the only mode that is interactive.
:param num_processors: number of processors used during simulation
:param save_field_patterns_kvecs: a list of k-vectors (3-tuples),
which indicates where field pattern h5 files are generated during
the simulation
:param convert_field_patterns: indicates whether field pattern h5
files should be converted to png (only when postprocessing)
:param containing_folder: the path to the folder which will contain
the simulation subfolder.
:param job_name_suffix: Optionally specify a job_name_suffix
(appendix to the folder name etc.) which will be appended to the
jobname created automatically from the most important parameters.
:param bands_title_appendix: will be added to the title of the bands
diagram.
:param plot_crop_y:
Optionally define a min. and max. frequency value (in a 2-tuple)
where the band diagram will be cropped.
:param extra_bands:
number of extra bands to calculate above band_number. Their
frequencies will be faulty since they were calculated with the
wrong effective epsilon, but perhaps you need them for
reference.
:param gap:
Optional tuple of the lower and upper band gap frequencies,
if you want to add the gap to the band diagram (default: None).
:return: the Simulation object
"""
# these k points will be simulated (along waveguide):
if isinstance(k_steps, (int, float)):
k_steps = int(k_steps)
k_points = np.linspace(0, 0.5, num=k_steps, endpoint=True)
else:
k_points = np.array(k_steps)
# If a longitudinal shift is used, inversion symmetry is broken:
if ((first_row_longitudinal_shift or second_row_longitudinal_shift) and
'mpbi' in defaults.mpb_call):
log.info('default MPB to use includes inversion symmetry: '
'{0}. '.format(defaults.mpb_call) +
'Shift of holes specified, which breaks inv. symmetry. '
'Will fall back to MPB without inv. symm.: {0}'.format(
defaults.mpb_call.replace('mpbi', 'mpb')
))
defaults.mpb_call = defaults.mpb_call.replace('mpbi', 'mpb')
# make it odd:
if supercell_size % 2 == 0:
supercell_size += 1
# Create geometry and add objects.
objects = get_triangular_phc_waveguide_air_rods(
radius=radius,
supercell_size=supercell_size,
ydirection=ydirection,
first_row_longitudinal_shift=first_row_longitudinal_shift,
first_row_transversal_shift=first_row_transversal_shift,
first_row_radius=first_row_radius,
second_row_longitudinal_shift=second_row_longitudinal_shift,
second_row_transversal_shift=second_row_transversal_shift,
second_row_radius=second_row_radius)
if ydirection:
geom = Geometry(
width='(* (sqrt 3) %i)' % supercell_size,
height=1,
triangular=False,
objects=objects
)
kspaceW1 = KSpace(
points_list=[(0, ky, 0) for ky in k_points],
k_interpolation=0,
)
else:
geom = Geometry(
width=1,
height='(* (sqrt 3) %i)' % supercell_size,
triangular=False,
objects=objects
)
kspaceW1 = KSpace(
points_list=[(kx, 0, 0) for kx in k_points],
k_interpolation=0,
)
jobname = (
'TriHoles2D_W1_effeps_kdep_band'
'{0:02.0f}{1}_r{2:03.0f}_res{3:03.0f}'.format(
band_number,
ensure_y_parity if ensure_y_parity in ['even', 'odd'] else '',
radius * 1000, resolution))
initcode = '\n'.join([
defaults.default_initcode,
'; the given epsilon is intended to be applied to this band:',
'(define bandnum {0:.0f})'.format(band_number)])
runcode = ''
if defaults.newmpb:
runcode = '(optimize-grid-size!)\n\n'
epsknots = ''.join(
'\n ' + ' '.join(
str(x) for x in epsilon_cubspline_knots[i:i + 4]
)
for i in range(0, len(epsilon_cubspline_knots), 4)
)
epscoeffs = ''.join(
'\n (' + ''.join(
'\n ' + ' '.join(
str(x) for x in epsilon_cubspline_coeffs[j, i:i + 4]
)
for i in range(0, len(epsilon_cubspline_coeffs[j]), 4)
) + '\n )'
for j in range(len(epsilon_cubspline_coeffs))
)
if mode == 'te':
outputfuncs = defaults.output_funcs_te
else:
outputfuncs = defaults.output_funcs_tm
bandfuncs = ("\n" + 20 * " ").join(
map(str.strip,
defaults.default_band_func(
save_field_patterns_kvecs, ' '.join(outputfuncs)
).strip().split('\n')))
rundict = {
'epsknots': epsknots,
'epscoeffs': epscoeffs,
'mode_lower': mode.lower(),
'mode_upper': mode.upper(),
'bandfuncs': bandfuncs}
runcode += defaults.template_epsilon_function % rundict
if ensure_y_parity == 'even':
extra_bands += 2
runcode += (
'\n'
'(define (get-bandnum-for-y-%s-parity init-bandnum)\n'
' (let ( (pars (compute-yparities))\n'
' )\n'
' (do ( (bi (- init-bandnum 1) (+ bi 1)) )\n'
' ( (< 0.5 (list-ref pars bi)) (+ bi 1)))\n'
'))\n\n' % ensure_y_parity
)
preparation = ''
bandnumfunc = 'get-bandnum-for-y-%s-parity' % ensure_y_parity
elif ensure_y_parity == 'odd':
extra_bands += 2
# Special handling of the y-odd wg mode in holey hexagonal photonic
# crystal. Beginning at small k upto k more than 1/2 pi/a, the mode
# has a nearly constant (negative) group velocity in waveguide
# direction. At small k, it extends above the band gap, crossing
# (actually also anti-crossing) other y-odd modes which extend into
# the bulk photonic crystal. If we just take the first y-odd mode
# we find above init-bandnum (like we are doing with y-even modes),
# we'll get one of those bulk modes at low k.
# We can utilize the proper waveguide mode's high (negative) group
# velocity, which makes it unique among the other modes (with
# positive velocities), to find it. Unfortunately, since it anti-
# crosses with the bulk y-odd modes, its frequencies are not exact
# (and it even depends on the supercell size which influences the
# number of bulk modes), but it is the best we can do:
runcode += defaults.template_y_odd_bandnum
preparation = ''
bandnumfunc = 'get-bandnum-for-y-%s-parity' % ensure_y_parity
else:
runcode += (
'\n'
'(define (get-bandnum-ignoring-parity init-bandnum)\n'
' init-bandnum)\n\n'
)
preparation = ''
bandnumfunc = 'get-bandnum-ignoring-parity'
rundict['preparation'] = preparation
rundict['bandnumfunc'] = bandnumfunc
runcode += defaults.template_runcode_k_dependent_epsilon % rundict
if "result" not in defaults.grep_datanames:
defaults.grep_datanames.append("result")
sim = Simulation(
jobname=jobname + job_name_suffix,
geometry=geom,
kspace=kspaceW1,
numbands=band_number + extra_bands,
resolution=resolution,
mesh_size=mesh_size,
initcode=initcode,
postcode='',
runcode=runcode,
work_in_subfolder=path.join(
containing_folder, jobname + job_name_suffix),
clear_subfolder=runmode.startswith('s') or runmode.startswith('c'))
draw_bands_title = (
'Hex. PhC W1; band {0:02.0f}, radius={1:0.3f}'.format(
band_number, radius) +
bands_title_appendix)
return do_runmode(
sim, runmode, num_processors, draw_bands_title,
plot_crop_y=plot_crop_y,
convert_field_patterns=convert_field_patterns,
field_pattern_plot_k_selection=field_pattern_plot_k_selection,
field_pattern_plot_filetype=defaults.field_dist_filetype,
x_axis_hint=[5, "{1}" if ydirection else "{0}"],
project_bands_list=gap,
color_by_parity='y'
)
|
probstj/pyMPB
|
pympb/phc_simulations.py
|
Python
|
gpl-3.0
| 75,418
|
[
"CRYSTAL",
"TINKER"
] |
5a427bd594ef0a022edf239c9760f0fa67ed2bd3baef7d7b9ffa3c5617a2e545
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import os
import pickle
from collections import OrderedDict
import logging
from utils import *
logger = logging.getLogger(__name__)
class Base(object):
"""Basic wrapper for the symbols
Parameters
----------
data_shapes : dict
The shapes of tensor variables
sym_gen : mx.sym.Symbol
Symbol of the network
params : None or dict, optional
params_grad : None or dict, optional
aux_states:
initializer:
ctx:
name:
"""
def __init__(self, data_shapes, sym_gen, params=None, aux_states=None,
default_bucket_kwargs=None, learn_init_keys=None,
initializer=mx.init.Xavier(factor_type="in", rnd_type="gaussian", magnitude=2),
ctx=mx.gpu(), name='Net'):
self.sym_gen = sym_gen
bucket_kwargs = default_bucket_kwargs.copy() if \
default_bucket_kwargs is not None else dict()
self.curr_bucket_key = None
self.ctx = ctx
self.name = name
self.initializer = initializer
if params is None:
self.params = None
self.params_grad = None
else:
self.params = OrderedDict([(k, v.copyto(ctx)) for k, v in params.items()])
self.params_grad = OrderedDict([(n, nd.empty(v.shape, ctx=ctx))
for n, v in self.params.items()])
if aux_states is not None:
self.aux_states = OrderedDict([(k, v.copyto(ctx)) for k, v in aux_states.items()])
else:
self.aux_states = None
self._buckets = dict()
self.learn_init_keys = learn_init_keys if learn_init_keys is not None else []
self.learn_init_key_shapes = {k: data_shapes[k] for k in self.learn_init_keys}
self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes)
self.acc_grad = None
@property
def exe(self):
"""Get the current executor
Returns
-------
exe : mxnet.executor.Executor
"""
return self._buckets[self.curr_bucket_key]['exe'][tuple(self.data_shapes.items())]
@property
def data_shapes(self):
return self._buckets[self.curr_bucket_key]['data_shapes']
@property
def sym(self):
return self._buckets[self.curr_bucket_key]['sym']
def switch_bucket(self, bucket_kwargs=None, data_shapes=None):
if bucket_kwargs is not None:
self.curr_bucket_key = get_bucket_key(bucket_kwargs=bucket_kwargs)
# 1. Check if bucket key exists
if self.curr_bucket_key in self._buckets:
if data_shapes is not None:
if tuple(data_shapes.items()) not in self._buckets[self.curr_bucket_key]['exe']:
#TODO Optimize the reshaping functionality!
self._buckets[self.curr_bucket_key]['exe'][tuple(data_shapes.items())] = \
self.exe.reshape(partial_shaping=True, allow_up_sizing=True, **data_shapes)
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
else:
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
return
# 2. If the bucket key does not exist, create new symbol + executor
assert data_shapes is not None, "Must set data_shapes for new bucket!"
if isinstance(self.sym_gen, mx.symbol.Symbol):
sym = self.sym_gen
else:
sym = self.sym_gen(**dict(self.curr_bucket_key))
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
param_names = [n for n in arg_names
if n in self.learn_init_keys or (n not in data_shapes.keys())]
for k, v in data_shapes.items():
assert isinstance(v, tuple), "Data_shapes must be tuple! Find k=%s, v=%s, " \
"data_shapes=%s" % (k, str(v), str(data_shapes))
arg_shapes, _, aux_shapes = sym.infer_shape(**data_shapes)
arg_name_shape = OrderedDict([(k, s) for k, s in zip(arg_names, arg_shapes)])
if self.params is None:
self.params = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
self.params_grad = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
if len(self.params) > 0:
assert self.initializer is not None, \
'We must set the initializer if we donnot initialize' \
'manually the free parameters of the network!!'
for k, v in self.params.items():
self.initializer(k, v)
else:
assert set(arg_name_shape.items()) == \
set(data_shapes.items() + [(k, v.shape) for k, v in self.params.items()])
if self.aux_states is None:
self.aux_states = OrderedDict([(k, nd.empty(s, ctx=self.ctx))
for k, s in zip(aux_names, aux_shapes)])
data_inputs = {k: mx.nd.empty(data_shapes[k], ctx=self.ctx)
for k in set(data_shapes.keys()) - set(self.learn_init_keys)}
if len(self._buckets) > 0:
shared_exe = list(list(self._buckets.values())[0]['exe'].values())[0]
else:
shared_exe = None
self._buckets[self.curr_bucket_key] = {
'exe': {tuple(data_shapes.items()):
sym.bind(ctx=self.ctx,
args=dict(self.params, **data_inputs),
args_grad=dict(self.params_grad.items()),
aux_states=self.aux_states,
shared_exec=shared_exe)
},
'data_shapes': data_shapes,
'sym': sym
}
def save_params(self, dir_path="", epoch=None):
param_saving_path = save_params(dir_path=dir_path, name=self.name, epoch=epoch,
params=self.params,
aux_states=self.aux_states)
misc_saving_path = save_misc(dir_path=dir_path, epoch=epoch, name=self.name,
content={'data_shapes': {k: map(int, v) for k, v in self.data_shapes.items()}})
logging.info('Saving %s, params: \"%s\", misc: \"%s\"',
self.name, param_saving_path, misc_saving_path)
def load_params(self, name="", dir_path="", epoch=None):
params, aux_states, param_loading_path = load_params(dir_path=dir_path, epoch=epoch, name=name)
logging.info('Loading params from \"%s\" to %s' % (param_loading_path, self.name))
for k, v in params.items():
if k in self.params:
logging.debug(' Loading %s %s' %(k, str(v.shape)))
self.params[k][:] = v
else:
logging.warn("Found unused param in the saved model file: %s" % k)
for k, v in aux_states.items():
self.aux_states[k][:] = v
@property
def internal_sym_names(self):
return self.sym.get_internals().list_outputs()
@property
def output_keys(self):
return self.sym.list_outputs()
def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict):
"""
View the internal symbols using the forward function.
:param sym_name:
:param bucket_kwargs:
:param input_dict:
:return:
"""
data_shapes = {k: v.shape for k, v in arg_dict.items()}
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
internal_sym = self.sym.get_internals()[sym_name]
data_inputs = {k: mx.nd.empty(v, ctx=self.ctx)
for k, v in self.data_shapes.items()
if k in internal_sym.list_arguments()}
params = {k: v for k, v in self.params.items() if
k in internal_sym.list_arguments()}
aux_states = {k: v for k, v in self.aux_states.items()
if k in internal_sym.list_auxiliary_states()}
exe = internal_sym.bind(ctx=self.ctx,
args=dict(params, **data_inputs),
args_grad=None,
grad_req='null',
aux_states=aux_states,
shared_exec=self.exe)
for k, v in arg_dict.items():
exe.arg_dict[k][:] = v
exe.forward(is_train=False)
assert 1 == len(exe.outputs)
for output in exe.outputs:
output.wait_to_read()
return exe.outputs[0]
def forward(self, is_train=False, bucket_kwargs=None, **arg_dict):
#import time
#start = time.time()
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
#end = time.time()
#print 'Swith Bucket:', end - start
#start = time.time()
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape,\
"Shape not match: key %s, need %s, received %s" \
%(k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=is_train)
for output in self.exe.outputs:
output.wait_to_read()
#end = time.time()
#print 'Forward:', end - start
return self.exe.outputs
def backward(self, out_grads=None, **arg_dict):
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape, \
"Shape not match: key %s, need %s, received %s" \
% (k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.backward(out_grads=out_grads)
def forward_backward(self, bucket_kwargs=None, out_grads=None, **arg_dict):
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
for k, v in arg_dict.items():
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=True)
self.exe.backward(out_grads=out_grads)
for output in self.exe.outputs:
output.wait_to_read()
return self.exe.outputs
def update(self, updater, params_grad=None):
if params_grad is None:
params_grad = self.params_grad
assert type(params_grad) is OrderedDict
for ind, k in enumerate(self.params.keys()):
updater(index=ind, grad=params_grad[k], weight=self.params[k])
def update_acc_grad(self):
if self.acc_grad is None:
self.acc_grad = OrderedDict([(n, nd.zeros(v.shape, ctx=self.ctx))
for n, v in self.params_grad.items()])
for k, v in self.acc_grad.items():
v[:] = v + self.params_grad[k]
def reset_acc_grad(self):
for v in self.acc_grad.values():
v[:] = 0
def copy(self, name=None, ctx=None):
if ctx is None:
ctx = self.ctx
if name is None:
name = self.name + '-copy-' + str(ctx)
return Base(data_shapes=self.data_shapes,
sym_gen=self.sym_gen,
default_bucket_kwargs=dict(self.curr_bucket_key),
params=self.params,
aux_states=self.aux_states, ctx=ctx, name=name)
def copy_params_to(self, dst):
for k, v in self.params.items():
dst.params[k][:] = v
# TODO `wait_to_read()` here seems unnecessary, remove it in the future!
dst.params[k].wait_to_read()
@property
def total_param_num(self):
return sum(v.size for v in self.params.values())
def print_stat(self):
logging.info("Name: %s" % self.name)
assert self.params is not None, "Fatal Error!"
logging.info("Params: ")
for k, v in self.params.items():
logging.info(" %s: %s" % (k, v.shape))
if self.aux_states is None or 0 == len(self.aux_states):
logging.info("Aux States: None")
else:
logging.info("Aux States: " + ' '.join(
["%s:%s" % (str(k), str(v.shape)) for k, v in self.aux_states.items()]))
logging.info("Total Parameter Num: " + str(self.total_param_num))
|
Mega-DatA-Lab/mxnet
|
example/reinforcement-learning/dqn/base.py
|
Python
|
apache-2.0
| 13,774
|
[
"Gaussian"
] |
482b0902ac17850ab9b9fabc86056bbea65e650539fb2f878b1aa995945a55dc
|
from math import ceil, log2
def one_bits(n):
"""
Returns the number of 1s in the binary representation of a non-negative integer.
Brian Kernighan’s algorithm. Note that x - 1 toggles all bits starting from the last 1-bit (inclusive). This has the
same effect as n -= n & -n, which is used in Fenwick tree.
Time complexity is O(\log n). Space complexity is O(1).
:param n: int, non-negative
:return: int
"""
assert n >= 0
c = 0
while n > 0:
n &= n - 1 # zeros the last 1-bit
c += 1
return c
def one_bits2(n):
assert n >= 0
for i, x in [(1, 0b01010101010101010101010101010101),
(2, 0b00110011001100110011001100110011),
(4, 0b00001111000011110000111100001111),
(8, 0b00000000111111110000000011111111),
(16, 0b00000000000000001111111111111111)]:
n = (n & x) + ((n >> i) & x)
return n
def is_bleak(x):
"""
A positive integer x is bleak if there does not exist y <= n, s.t. x == y + one_bits(y)
:param x: int, positive
:return: bool
"""
assert x > 0
for y in range(x - ceil(log2(x)), x): # the greatest # of 1-bits in any y <= x is ceil(log_2(x))
if y + one_bits(y) == x:
return False
return True
if __name__ == '__main__':
for n in range(1000):
assert one_bits(n) == one_bits2(n) == bin(n)[2:].count('1')
for i in range(1, 100):
print(i, is_bleak(i))
|
liboyin/algo-prac
|
mathematics/bleak_number.py
|
Python
|
gpl-3.0
| 1,490
|
[
"Brian"
] |
70c8a050a83726b57a6a6bdee77a73e1989dbe6e8bc94b13e92f80fb77a935dc
|
"""
Numerical python functions written for compatability with MATLAB
commands with the same names.
MATLAB compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
Interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
Find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (spectrum over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in MATLAB, but are useful anyway:
:func:`cohere_pairs`
Coherence over all pairs. This is not a MATLAB function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:func:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
:func:`contiguous_regions`
Return the indices of the regions spanned by some logical mask
:func:`cross_from_below`
Return the indices where a 1D array crosses a threshold from below
:func:`cross_from_above`
Return the indices where a 1D array crosses a threshold from above
:func:`complex_spectrum`
Return the complex-valued frequency spectrum of a signal
:func:`magnitude_spectrum`
Return the magnitude of the frequency spectrum of a signal
:func:`angle_spectrum`
Return the angle (wrapped phase) of the frequency spectrum of a signal
:func:`phase_spectrum`
Return the phase (unwrapped angle) of the frequency spectrum of a signal
:func:`detrend_mean`
Remove the mean from a line.
:func:`demean`
Remove the mean from a line. This function is the same as as
:func:`detrend_mean` except for the default *axis*.
:func:`detrend_linear`
Remove the best fit line from a line.
:func:`detrend_none`
Return the original line.
:func:`stride_windows`
Get all windows in an array in a memory-efficient manner
:func:`stride_repeat`
Repeat an array in a memory-efficient manner
:func:`apply_window`
Apply a window along a given axis
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly:
See :ref:`misc-examples-index`
:func:`rec2txt`
Pretty print a record array
:func:`rec2csv`
Store record array in CSV file
:func:`csv2rec`
Import record array from CSV file with type inspection
:func:`rec_append_fields`
Adds field(s)/array(s) to record array
:func:`rec_drop_fields`
Drop fields from record array
:func:`rec_join`
Join two record arrays on sequence of fields
:func:`recs_join`
A simple join of multiple recarrays using a single column as a key
:func:`rec_groupby`
Summarize data by groups (similar to SQL GROUP BY)
:func:`rec_summarize`
Helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:func:`load`
Load ASCII file - use numpy.loadtxt
:func:`save`
Save ASCII file - use numpy.savetxt
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, xrange, zip
if six.PY3:
long = int
import copy
import csv
import operator
import os
import warnings
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.cbook as cbook
from matplotlib import docstring
from matplotlib.path import Path
def logspace(xmin, xmax, N):
'''
Return N values logarithmically spaced between xmin and xmax.
Call signature::
logspace(xmin, xmax, N)
'''
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
'''
Return sqrt(x dot x).
Call signature::
_norm(x)
'''
return np.sqrt(np.dot(x, x))
def window_hanning(x):
'''
Return x times the hanning window of len(x).
Call signature::
window_hanning(x)
.. seealso::
:func:`window_none`
:func:`window_none` is another window algorithm.
'''
return np.hanning(len(x))*x
def window_none(x):
'''
No window function; simply return x.
Call signature::
window_none(x)
.. seealso::
:func:`window_hanning`
:func:`window_hanning` is another window algorithm.
'''
return x
def apply_window(x, window, axis=0, return_window=None):
'''
Apply the given window to the given 1D or 2D array along the given axis.
Call signature::
apply_window(x, window, axis=0, return_window=False)
*x*: 1D or 2D array or sequence
Array or sequence containing the data.
*winodw*: function or array.
Either a function to generate a window or an array with length
*x*.shape[*axis*]
*axis*: integer
The axis over which to do the repetition.
Must be 0 or 1. The default is 0
*return_window*: bool
If true, also return the 1D values of the window that was applied
'''
x = np.asarray(x)
if x.ndim < 1 or x.ndim > 2:
raise ValueError('only 1D or 2D arrays can be used')
if axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
xshape = list(x.shape)
xshapetarg = xshape.pop(axis)
if cbook.iterable(window):
if len(window) != xshapetarg:
raise ValueError('The len(window) must be the same as the shape '
'of x for the chosen axis')
windowVals = window
else:
windowVals = window(np.ones(xshapetarg, dtype=x.dtype))
if x.ndim == 1:
if return_window:
return windowVals * x, windowVals
else:
return windowVals * x
xshapeother = xshape.pop()
otheraxis = (axis+1) % 2
windowValsRep = stride_repeat(windowVals, xshapeother, axis=otheraxis)
if return_window:
return windowValsRep * x, windowVals
else:
return windowValsRep * x
def detrend(x, key=None, axis=None):
'''
Return x with its trend removed.
Call signature::
detrend(x, key='mean')
*x*: array or sequence
Array or sequence containing the data.
*key*: [ 'default' | 'constant' | 'mean' | 'linear' | 'none'] or function
Specifies the detrend algorithm to use. 'default' is 'mean',
which is the same as :func:`detrend_mean`. 'constant' is the same.
'linear' is the same as :func:`detrend_linear`. 'none' is the same
as :func:`detrend_none`. The default is 'mean'. See the
corresponding functions for more details regarding the algorithms.
Can also be a function that carries out the detrend operation.
*axis*: integer
The axis along which to do the detrending.
.. seealso::
:func:`detrend_mean`
:func:`detrend_mean` implements the 'mean' algorithm.
:func:`detrend_linear`
:func:`detrend_linear` implements the 'linear' algorithm.
:func:`detrend_none`
:func:`detrend_none` implements the 'none' algorithm.
'''
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)
elif cbook.is_string_like(key):
raise ValueError("Unknown value for key %s, must be one of: "
"'default', 'constant', 'mean', "
"'linear', or a function" % key)
if not callable(key):
raise ValueError("Unknown value for key %s, must be one of: "
"'default', 'constant', 'mean', "
"'linear', or a function" % key)
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
return key(x)
# try to use the 'axis' argument if the function supports it,
# otherwise use apply_along_axis to do it
try:
return key(x, axis=axis)
except TypeError:
return np.apply_along_axis(key, axis=axis, arr=x)
def demean(x, axis=0):
'''
Return x minus its mean along the specified axis.
Call signature::
demean(x, axis=0)
*x*: array or sequence
Array or sequence containing the data
Can have any dimensionality
*axis*: integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
.. seealso::
:func:`delinear`
:func:`denone`
:func:`delinear` and :func:`denone` are other detrend algorithms.
:func:`detrend_mean`
This function is the same as as :func:`detrend_mean` except
for the default *axis*.
'''
return detrend_mean(x, axis=axis)
def detrend_mean(x, axis=None):
'''
Return x minus the mean(x).
Call signature::
detrend_mean(x, axis=None)
*x*: array or sequence
Array or sequence containing the data
Can have any dimensionality
*axis*: integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
.. seealso::
:func:`demean`
This function is the same as as :func:`demean` except
for the default *axis*.
:func:`detrend_linear`
:func:`detrend_none`
:func:`detrend_linear` and :func:`detrend_none` are other
detrend algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
# short-circuit 0-D array.
if not x.ndim:
return np.array(0., dtype=x.dtype)
# short-circuit simple operations
if axis == 0 or axis is None or x.ndim <= 1:
return x - x.mean(axis)
ind = [slice(None)] * x.ndim
ind[axis] = np.newaxis
return x - x.mean(axis)[ind]
def detrend_none(x, axis=None):
'''
Return x: no detrending.
Call signature::
detrend_none(x, axis=None)
*x*: any object
An object containing the data
*axis*: integer
This parameter is ignored.
It is included for compatibility with detrend_mean
.. seealso::
:func:`denone`
This function is the same as as :func:`denone` except
for the default *axis*, which has no effect.
:func:`detrend_mean`
:func:`detrend_linear`
:func:`detrend_mean` and :func:`detrend_linear` are other
detrend algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
return x
def detrend_linear(y):
'''
Return x minus best fit line; 'linear' detrending.
Call signature::
detrend_linear(y)
*y*: 0-D or 1-D array or sequence
Array or sequence containing the data
*axis*: integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
.. seealso::
:func:`delinear`
This function is the same as as :func:`delinear` except
for the default *axis*.
:func:`detrend_mean`
:func:`detrend_none`
:func:`detrend_mean` and :func:`detrend_none` are other
detrend algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
# This is faster than an algorithm based on linalg.lstsq.
y = np.asarray(y)
if y.ndim > 1:
raise ValueError('y cannot have ndim > 1')
# short-circuit 0-D array.
if not y.ndim:
return np.array(0., dtype=y.dtype)
x = np.arange(y.size, dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0, 1]/C[0, 0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def stride_windows(x, n, noverlap=None, axis=0):
'''
Get all windows of x with length n as a single array,
using strides to avoid data duplication.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory,
so modifying one value may change others.
Call signature::
stride_windows(x, n, noverlap=0)
*x*: 1D array or sequence
Array or sequence containing the data.
*n*: integer
The number of data points in each window.
*noverlap*: integer
The overlap between adjacent windows.
Default is 0 (no overlap)
*axis*: integer
The axis along which the windows will run.
Refs:
`stackoverflaw: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>`_
`stackoverflaw: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>`_
'''
if noverlap is None:
noverlap = 0
if noverlap >= n:
raise ValueError('noverlap must be less than n')
if n < 1:
raise ValueError('n cannot be less than 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1 and noverlap == 0:
if axis == 0:
return x[np.newaxis]
else:
return x[np.newaxis].transpose()
if n > x.size:
raise ValueError('n cannot be greater than the length of x')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. noverlap or n. See #3845.
noverlap = int(noverlap)
n = int(n)
step = n - noverlap
if axis == 0:
shape = (n, (x.shape[-1]-noverlap)//step)
strides = (x.strides[0], step*x.strides[0])
else:
shape = ((x.shape[-1]-noverlap)//step, n)
strides = (step*x.strides[0], x.strides[0])
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def stride_repeat(x, n, axis=0):
'''
Repeat the values in an array in a memory-efficient manner. Array x is
stacked vertically n times.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory, so
modifying one value may change others.
Call signature::
stride_repeat(x, n, axis=0)
*x*: 1D array or sequence
Array or sequence containing the data.
*n*: integer
The number of time to repeat the array.
*axis*: integer
The axis along which the data will run.
Refs:
`stackoverflaw: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>`_
'''
if axis not in [0, 1]:
raise ValueError('axis must be 0 or 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1:
if axis == 0:
return np.atleast_2d(x)
else:
return np.atleast_2d(x).T
if n < 1:
raise ValueError('n cannot be less than 1')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. n. See #3845.
n = int(n)
if axis == 0:
shape = (n, x.size)
strides = (0, x.strides[0])
else:
shape = (x.size, n)
strides = (x.strides[0], 0)
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None):
'''
This is a helper function that implements the commonality between the
psd, csd, spectrogram and complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if y is None:
# if y is None use x for y
same_data = True
else:
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
if Fs is None:
Fs = 2
if noverlap is None:
noverlap = 0
if detrend_func is None:
detrend_func = detrend_none
if window is None:
window = window_hanning
# if NFFT is set to None use the whole signal
if NFFT is None:
NFFT = 256
if mode is None or mode == 'default':
mode = 'psd'
elif mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
if sides is None or sides == 'default':
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
elif sides not in ['onesided', 'twosided']:
raise ValueError("Unknown value for sides %s, must be one of: "
"'default', 'onesided', or 'twosided'" % sides)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if mode != 'psd':
scale_by_freq = False
elif scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if sides == 'twosided':
numFreqs = pad_to
if pad_to % 2:
freqcenter = (pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
result = stride_windows(x, NFFT, noverlap, axis=0)
result = detrend(result, detrend_func, axis=0)
result, windowVals = apply_window(result, window, axis=0,
return_window=True)
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = stride_windows(y, NFFT, noverlap)
resultY = apply_window(resultY, window, axis=0)
resultY = detrend(resultY, detrend_func, axis=0)
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conjugate(result) * resultY
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
pass
if mode == 'psd':
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (np.abs(windowVals)**2).sum()
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
result[1:-1] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[freqcenter:], freqs[:freqcenter]))
result = np.concatenate((result[freqcenter:, :],
result[:freqcenter, :]), 0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(x, mode, Fs=None, window=None, pad_to=None,
sides=None):
'''
This is a helper function that implements the commonality between the
complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if mode is None or mode == 'psd' or mode == 'default':
raise ValueError('_single_spectrum_helper does not work with %s mode'
% mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if len(spec.shape) == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
#Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(Spectral=cbook.dedent("""
Keyword arguments:
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the spectrum to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided
spectrum, while 'twosided' forces two-sided.
"""))
docstring.interpd.update(Single_Spectrum=cbook.dedent("""
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. While not increasing the actual resolution of
the spectrum (the minimum distance between resolvable peaks),
this can give more points in the plot, allowing for more
detail. This corresponds to the *n* parameter in the call to fft().
The default is None, which sets *pad_to* equal to the length of the
input signal (i.e. no padding).
"""))
docstring.interpd.update(PSD=cbook.dedent("""
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the spectrum (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*NFFT*: integer
The number of data points used in each block for the FFT.
A power 2 is most efficient. The default value is 256.
This should *NOT* be used to get zero padding, or the scaling of the
result will be incorrect. Use *pad_to* for this instead.
*detrend*: [ 'default' | 'constant' | 'mean' | 'linear' | 'none'] or
callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
MATLAB, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well. You can also use a string to choose
one of the functions. 'default', 'constant', and 'mean' call
:func:`~matplotlib.pylab.detrend_mean`. 'linear' calls
:func:`~matplotlib.pylab.detrend_linear`. 'none' calls
:func:`~matplotlib.pylab.detrend_none`.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MATLAB compatibility.
"""))
@docstring.dedent_interpd
def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the power spectral density.
Call signature::
psd(x, NFFT=256, Fs=2, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None)
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns the tuple (*Pxx*, *freqs*).
*Pxx*: 1-D array
The values for the power spectrum `P_{xx}` (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxx*
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
.. seealso::
:func:`specgram`
:func:`specgram` differs in the default overlap; in not returning
the mean of the segment periodograms; and in returning the
times of the segments.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the magnitude spectrum.
:func:`csd`
:func:`csd` returns the spectral density between two signals.
"""
Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
return Pxx.real, freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the cross-spectral density.
Call signature::
csd(x, y, NFFT=256, Fs=2, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*: 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns the tuple (*Pxy*, *freqs*):
*Pxy*: 1-D array
The values for the cross spectrum `P_{xy}` before scaling
(real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxy*
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
.. seealso::
:func:`psd`
:func:`psd` is the equivalent to setting y=x.
"""
if NFFT is None:
NFFT = 256
Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
mode='psd')
if len(Pxy.shape) == 2:
if Pxy.shape[1] > 1:
Pxy = Pxy.mean(axis=1)
else:
Pxy = Pxy[:, 0]
return Pxy, freqs
@docstring.dedent_interpd
def complex_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the complex-valued frequency spectrum of *x*. Data is padded to a
length of *pad_to* and the windowing function *window* is applied to the
signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the complex spectrum (complex valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the absolute value of this
function.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the angle of this
function.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the phase (unwrapped angle) of this
function.
:func:`specgram`
:func:`specgram` can return the complex spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='complex')
@docstring.dedent_interpd
def magnitude_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the magnitude (absolute value) of the frequency spectrum of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the magnitude spectrum (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`psd`
:func:`psd` returns the power spectral density.
:func:`complex_spectrum`
This function returns the absolute value of
:func:`complex_spectrum`.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the angles of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the phase (unwrapped angle) of the
corresponding frequencies.
:func:`specgram`
:func:`specgram` can return the magnitude spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='magnitude')
@docstring.dedent_interpd
def angle_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the angle of the frequency spectrum (wrapped phase spectrum) of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the angle spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`complex_spectrum`
This function returns the angle value of
:func:`complex_spectrum`.
:func:`magnitude_spectrum`
:func:`angle_spectrum` returns the magnitudes of the
corresponding frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the unwrapped version of this
function.
:func:`specgram`
:func:`specgram` can return the angle spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='angle')
@docstring.dedent_interpd
def phase_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the phase of the frequency spectrum (unwrapped angle spectrum) of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the phase spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`complex_spectrum`
This function returns the angle value of
:func:`complex_spectrum`.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the magnitudes of the
corresponding frequencies.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the wrapped version of this
function.
:func:`specgram`
:func:`specgram` can return the phase spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='phase')
@docstring.dedent_interpd
def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
mode=None):
"""
Compute a spectrogram.
Call signature::
specgram(x, NFFT=256, Fs=2,detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, mode='default')
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*mode*: [ 'default' | 'psd' | 'complex' | 'magnitude'
'angle' | 'phase' ]
What sort of spectrum to use. Default is 'psd'. which takes the
power spectral density. 'complex' returns the complex-valued
frequency spectrum. 'magnitude' returns the magnitude spectrum.
'angle' returns the phase spectrum without unwrapping. 'phase'
returns the phase spectrum with unwrapping.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 128.
Returns the tuple (*spectrum*, *freqs*, *t*):
*spectrum*: 2-D array
columns are the periodograms of successive segments
*freqs*: 1-D array
The frequencies corresponding to the rows in *spectrum*
*t*: 1-D array
The times corresponding to midpoints of segments (i.e the columns
in *spectrum*).
.. note::
*detrend* and *scale_by_freq* only apply when *mode* is set to
'psd'
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
:func:`complex_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'complex'.
:func:`magnitude_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'magnitude'.
:func:`angle_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'angle'.
:func:`phase_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'phase'.
"""
if noverlap is None:
noverlap = 128
spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if mode != 'complex':
spec = spec.real # Needed since helper implements generically
return spec, freqs, t
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
@docstring.dedent_interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect,
since the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x) < 2 * NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
"""
Call signature::
Cxy, Phase, freqs = cohere_pairs( X, ij, ...)
Compute the coherence and phase for all pairs *ij*, in *X*.
*X* is a *numSamples* * *numCols* array
*ij* is a list of tuples. Each tuple is a pair of indexes into
the columns of X for which you want to compute coherence. For
example, if *X* has 64 columns, and you want to compute all
nonredundant pairs, define *ij* as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i,j) )
*preferSpeedOverMemory* is an optional bool. Defaults to true. If
False, limits the caching by only making one, rather than two,
complex cache arrays. This is useful if memory becomes critical.
Even when *preferSpeedOverMemory* is False, :func:`cohere_pairs`
will still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is True. In my tests with
a 43000,64 array over all nonredundant pairs,
*preferSpeedOverMemory* = True delivered a 33% performance boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = False. But both solutions were more
than 10x faster than naively crunching all possible pairs through
:func:`cohere`.
Returns::
(Cxy, Phase, freqs)
where:
- *Cxy*: dictionary of (*i*, *j*) tuples -> coherence vector for
that pair. i.e., ``Cxy[(i,j) = cohere(X[:,i], X[:,j])``.
Number of dictionary keys is ``len(ij)``.
- *Phase*: dictionary of phases of the cross spectral density at
each frequency for each pair. Keys are (*i*, *j*).
- *freqs*: vector of frequencies, equal in length to either the
coherence or phase vectors for any (*i*, *j*) key.
e.g., to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If :math:`N` is the
number of pairs, this function is :math:`O(N)` for most of the
heavy lifting, whereas calling cohere for each pair is
:math:`O(N^2)`. However, because of the caching, it is also more
memory intensive, making 2 additional complex arrays with
approximately the same number of elements as *X*.
See :file:`test/cohere_pairs_test.py` in the src tree for an
example script that shows that this :func:`cohere_pairs` and
:func:`cohere` give the same results for a given pair.
.. seealso::
:func:`psd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
allColumns = set()
for i,j in ij:
allColumns.add(i); allColumns.add(j)
Ncols = len(allColumns)
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones(NFFT, X.dtype))
ind = list(xrange(0, numRows-NFFT+1, NFFT-noverlap))
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = np.linalg.norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = np.fft.fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = np.conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(abs(Slices)**2, axis=0), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy, axis=0)
#Pxy = np.divide(Pxy, normVal)
Pxy /= normVal
#Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Cxy[i,j] = abs(Pxy)**2 / (Pxx[i]*Pxx[j])
Phase[i,j] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y* in units of nat.
.. math::
-\sum p_i \ln(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n, bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1] - bins[0]
S = -1.0 * np.sum(p * np.log(p)) + np.log(delta)
return S
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
.. warning::
This function is deprecated -- please see class PCA instead
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, i.e., *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the MATLAB
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
warnings.warn('This function is deprecated -- see class PCA instead')
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
class PCA:
def __init__(self, a, standardize=True):
"""
compute the SVD of a and store data for PCA. Use project to
project the data onto a reduced set of dimensions
Inputs:
*a*: a numobservations x numdims array
*standardize*: True if input data are to be standardized. If False, only centering will be
carried out.
Attrs:
*a* a centered unit sigma version of input a
*numrows*, *numcols*: the dimensions of a
*mu* : a numdims array of means of a. This is the vector that points to the
origin of PCA space.
*sigma* : a numdims array of standard deviation of a
*fracs* : the proportion of variance of each of the principal components
*s* : the actual eigenvalues of the decomposition
*Wt* : the weight vector for projecting a numdims point or array into PCA space
*Y* : a projected into PCA space
The factor loadings are in the Wt factor, i.e., the factor
loadings for the 1st principal component are given by Wt[0].
This row is also the 1st eigenvector.
"""
n, m = a.shape
if n<m:
raise RuntimeError('we assume data in a is organized with numrows>numcols')
self.numrows, self.numcols = n, m
self.mu = a.mean(axis=0)
self.sigma = a.std(axis=0)
self.standardize = standardize
a = self.center(a)
self.a = a
U, s, Vh = np.linalg.svd(a, full_matrices=False)
# Note: .H indicates the conjugate transposed / Hermitian.
# The SVD is commonly written as a = U s V.H.
# If U is a unitary matrix, it means that it satisfies U.H = inv(U).
# The rows of Vh are the eigenvectors of a.H a.
# The columns of U are the eigenvectors of a a.H.
# For row i in Vh and column i in U, the corresponding eigenvalue is s[i]**2.
self.Wt = Vh
# save the transposed coordinates
Y = np.dot(Vh, a.T).T
self.Y = Y
# save the eigenvalues
self.s = s**2
# and now the contribution of the individual components
vars = self.s/float(len(s))
self.fracs = vars/vars.sum()
def project(self, x, minfrac=0.):
'project x onto the principle axes, dropping any axes where fraction of variance<minfrac'
x = np.asarray(x)
ndims = len(x.shape)
if (x.shape[-1]!=self.numcols):
raise ValueError('Expected an array with dims[-1]==%d'%self.numcols)
Y = np.dot(self.Wt, self.center(x).T).T
mask = self.fracs>=minfrac
if ndims==2:
Yreduced = Y[:,mask]
else:
Yreduced = Y[mask]
return Yreduced
def center(self, x):
'center and optionally standardize the data using the mean and sigma from training set a'
if self.standardize:
return (x - self.mu)/self.sigma
else:
return (x - self.mu)
@staticmethod
def _get_colinear():
c0 = np.array([
0.19294738, 0.6202667 , 0.45962655, 0.07608613, 0.135818 ,
0.83580842, 0.07218851, 0.48318321, 0.84472463, 0.18348462,
0.81585306, 0.96923926, 0.12835919, 0.35075355, 0.15807861,
0.837437 , 0.10824303, 0.1723387 , 0.43926494, 0.83705486])
c1 = np.array([
-1.17705601, -0.513883 , -0.26614584, 0.88067144, 1.00474954,
-1.1616545 , 0.0266109 , 0.38227157, 1.80489433, 0.21472396,
-1.41920399, -2.08158544, -0.10559009, 1.68999268, 0.34847107,
-0.4685737 , 1.23980423, -0.14638744, -0.35907697, 0.22442616])
c2 = c0 + 2*c1
c3 = -3*c0 + 4*c1
a = np.array([c3, c0, c1, c2]).T
return a
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
# This implementation derived from scipy.stats.scoreatpercentile
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a)*fraction
scalar = True
if cbook.iterable(p):
scalar = False
per = np.array(p)
values = np.array(x).ravel() # copy
values.sort()
idxs = per /100. * (values.shape[0] - 1)
ai = idxs.astype(np.int)
bi = ai + 1
frac = idxs % 1
# handle cases where attempting to interpolate past last index
cond = bi >= len(values)
if scalar:
if cond:
ai -= 1
bi -= 1
frac += 1
else:
ai[cond] -= 1
bi[cond] -= 1
frac[cond] += 1
return _interpolate(values[ai],values[bi],frac)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). e.g., if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (e.g.,
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
@cbook.deprecated('1.3', name='FIFOBuffer', obj_type='class')
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xy = np.asarray([(x,y),])
self.dataLim.update_from_data_xy(xy, None)
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in six.iteritems(self.callbackd):
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_from_data(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <[email protected]>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return np.exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(list(map(fn,*args)))
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = list(map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1)))
digits = list(map (operator.mod, shifts, max_length * [2]))
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the MATLAB function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
### end fperez numutils code
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError("number of arrays do not match number of names")
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = list(map(np.asarray, arrs))
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError("dtypes must be None, a single dtype or a list")
newdtype = np.dtype(rec.dtype.descr + list(zip(names, dtypes)))
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return newrec
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return newrec
def rec_keep_fields(rec, names):
"""
Return a new numpy record array with only fields listed in names
"""
if cbook.is_string_like(names):
names = names.split(',')
arrays = []
for name in names:
arrays.append(rec[name])
return np.rec.fromarrays(arrays, names=names)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. e.g., ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = list(six.iterkeys(rowd))
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = list(zip(*stats))
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r2.dtype[name]
if dt1 != dt2:
msg = "The '{}' fields in arrays 'r1' and 'r2' must have the same"
msg += " dtype."
raise ValueError(msg.format(name))
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.recarray((common_len + left_len + right_len,), dtype=newdtype)
if defaults is not None:
for thiskey in defaults:
if thiskey not in newdtype.names:
warnings.warn('rec_join defaults key="%s" not in new dtype names "%s"'%(
thiskey, newdtype.names))
for name in newdtype.names:
dt = newdtype[name]
if dt.kind in ('f', 'i'):
newrec[name] = 0
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = list(six.iterkeys(newrec.dtype.fields.keys))
for k, v in six.iteritems(defaults):
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return newrec
def recs_join(key, name, recs, jointype='outer', missing=0., postfixes=None):
"""
Join a sequence of record arrays on single column key.
This function only joins a single column of the multiple record arrays
*key*
is the column name that acts as a key
*name*
is the name of the column that we want to join
*recs*
is a list of record arrays to join
*jointype*
is a string 'inner' or 'outer'
*missing*
is what any missing field is replaced by
*postfixes*
if not None, a len recs sequence of postfixes
returns a record array with columns [rowkey, name0, name1, ... namen-1].
or if postfixes [PF0, PF1, ..., PFN-1] are supplied,
[rowkey, namePF0, namePF1, ... namePFN-1].
Example::
r = recs_join("date", "close", recs=[r0, r1], missing=0.)
"""
results = []
aligned_iters = cbook.align_iterators(operator.attrgetter(key), *[iter(r) for r in recs])
def extract(r):
if r is None: return missing
else: return r[name]
if jointype == "outer":
for rowkey, row in aligned_iters:
results.append([rowkey] + list(map(extract, row)))
elif jointype == "inner":
for rowkey, row in aligned_iters:
if None not in row: # throw out any Nones
results.append([rowkey] + list(map(extract, row)))
if postfixes is None:
postfixes = ['%d'%i for i in range(len(recs))]
names = ",".join([key] + ["%s%s" % (name, postfix) for postfix in postfixes])
return np.rec.fromrecords(results, names=names)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=False, dayfirst=False, yearfirst=False):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file, or *None* to switch off the removal of comments
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converterd*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g., '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
- *dayfirst*: default is False so that MM-DD-YY has precedence over
DD-MM-YY. See http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
- *yearfirst*: default is False so that MM-DD-YY has precedence over
YY-MM-DD. See http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
fh = cbook.to_filehandle(fname)
delimiter = str(delimiter)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def __next__(self):
return self.fix(next(self.fh))
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x, dayfirst=dayfirst, yearfirst=yearfirst)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and comments is not None and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
while 1:
# skip past any comments and consume one line of column header
row = next(reader)
if len(row) and comments is not None and row[0].startswith(comments):
continue
break
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row):
continue
if comments is not None and row[0].startswith(comments):
continue
# Ensure that the row returned always has the same nr of elements
row.extend([''] * (len(converters) - len(row)))
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
def __hash__(self):
"""
override the hash function of any of the formatters, so that we don't create duplicate excel format styles
"""
return hash(self.__class__)
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def __hash__(self):
return hash((self.__class__, self.precision, self.scale))
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def __hash__(self):
return hash((self.__class__, self.fmt))
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3, fields=None):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
*fields* : if not None, a list of field names to print. fields
can be a list of strings like ['field1', 'field2'] or a single
comma separated string like 'field1,field2'
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if fields is not None:
r = rec_keep_fields(r, fields)
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(list(map(len, list(map(str,column))))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(list(map(len, list(map(lambda x:fmt%x,column))))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(list(map(len, list(map(str,column))))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None, withheader=True):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
*withheader*: if withheader is False, do not write the attribute
names in the first row
for formatd type FormatFloat, we override the precision to store
full precision floats in the CSV file
.. seealso::
:func:`csv2rec`
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
delimiter = str(delimiter)
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
if r.ndim != 1:
raise ValueError('rec2csv only operates on 1 dimensional recarrays')
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'wb', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
if withheader:
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x, y, z, xi, yi, interp='nn'):
"""Interpolates from a nonuniformly spaced grid to some other
grid.
Fits a surface of the form z = f(`x`, `y`) to the data in the
(usually) nonuniformly spaced vectors (`x`, `y`, `z`), then
interpolates this surface at the points specified by
(`xi`, `yi`) to produce `zi`.
Parameters
----------
x, y, z : 1d array_like
Coordinates of grid points to interpolate from.
xi, yi : 1d or 2d array_like
Coordinates of grid points to interpolate to.
interp : string key from {'nn', 'linear'}
Interpolation algorithm, either 'nn' for natural neighbor, or
'linear' for linear interpolation.
Returns
-------
2d float array
Array of values interpolated at (`xi`, `yi`) points. Array
will be masked is any of (`xi`, `yi`) are outside the convex
hull of (`x`, `y`).
Notes
-----
If `interp` is 'nn' (the default), uses natural neighbor
interpolation based on Delaunay triangulation. This option is
only available if the mpl_toolkits.natgrid module is installed.
This can be downloaded from https://github.com/matplotlib/natgrid.
The (`xi`, `yi`) grid must be regular and monotonically increasing
in this case.
If `interp` is 'linear', linear interpolation is used via
matplotlib.tri.LinearTriInterpolator.
Instead of using `griddata`, more flexible functionality and other
interpolation options are available using a
matplotlib.tri.Triangulation and a matplotlib.tri.TriInterpolator.
"""
# Check input arguments.
x = np.asanyarray(x, dtype=np.float64)
y = np.asanyarray(y, dtype=np.float64)
z = np.asanyarray(z, dtype=np.float64)
if x.shape != y.shape or x.shape != z.shape or x.ndim != 1:
raise ValueError("x, y and z must be equal-length 1-D arrays")
xi = np.asanyarray(xi, dtype=np.float64)
yi = np.asanyarray(yi, dtype=np.float64)
if xi.ndim != yi.ndim:
raise ValueError("xi and yi must be arrays with the same number of "
"dimensions (1 or 2)")
if xi.ndim == 2 and xi.shape != yi.shape:
raise ValueError("if xi and yi are 2D arrays, they must have the same "
"shape")
if xi.ndim == 1:
xi, yi = np.meshgrid(xi, yi)
if interp == 'nn':
use_nn_interpolation = True
elif interp == 'linear':
use_nn_interpolation = False
else:
raise ValueError("interp keyword must be one of 'linear' (for linear "
"interpolation) or 'nn' (for natural neighbor "
"interpolation). Default is 'nn'.")
# Remove masked points.
mask = np.ma.getmask(z)
if not (mask is np.ma.nomask):
x = x.compress(~mask)
y = y.compress(~mask)
z = z.compressed()
if use_nn_interpolation:
try:
from mpl_toolkits.natgrid import _natgrid
except ImportError:
raise RuntimeError("To use interp='nn' (Natural Neighbor "
"interpolation) in griddata, natgrid must be installed. "
"Either install it from http://sourceforge.net/projects/"
"matplotlib/files/matplotlib-toolkits, or use interp='linear' "
"instead.")
if xi.ndim == 2:
# natgrid expects 1D xi and yi arrays.
xi = xi[0, :]
yi = yi[:, 0]
# Override default natgrid internal parameters.
_natgrid.seti(b'ext', 0)
_natgrid.setr(b'nul', np.nan)
if np.min(np.diff(xi)) < 0 or np.min(np.diff(yi)) < 0:
raise ValueError("Output grid defined by xi,yi must be monotone "
"increasing")
# Allocate array for output (buffer will be overwritten by natgridd)
zi = np.empty((yi.shape[0], xi.shape[0]), np.float64)
# Natgrid requires each array to be contiguous rather than e.g. a view
# that is a non-contiguous slice of another array. Use numpy.require
# to deal with this, which will copy if necessary.
x = np.require(x, requirements=['C'])
y = np.require(y, requirements=['C'])
z = np.require(z, requirements=['C'])
xi = np.require(xi, requirements=['C'])
yi = np.require(yi, requirements=['C'])
_natgrid.natgridd(x, y, z, xi, yi, zi)
# Mask points on grid outside convex hull of input data.
if np.any(np.isnan(zi)):
zi = np.ma.masked_where(np.isnan(zi), zi)
return zi
else:
# Linear interpolation performed using a matplotlib.tri.Triangulation
# and a matplotlib.tri.LinearTriInterpolator.
from .tri import Triangulation, LinearTriInterpolator
triang = Triangulation(x, y)
interpolator = LinearTriInterpolator(triang, z)
return interpolator(xi, yi)
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
class GaussianKDE(object):
"""
Representation of a kernel-density estimate using Gaussian kernels.
Call signature::
kde = GaussianKDE(dataset, bw_method='silverman')
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as only
parameter and return a scalar. If None (default), 'scott' is used.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
dim : int
Number of dimensions.
num_dp : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
"""
# This implementation with minor modification was too good to pass up.
# from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
def __init__(self, dataset, bw_method=None):
self.dataset = np.atleast_2d(dataset)
if not np.array(self.dataset).size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.dim, self.num_dp = np.array(self.dataset).shape
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif (np.isscalar(bw_method) and not
isinstance(bw_method, six.string_types)):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
# Computes the covariance matrix for each Gaussian kernel using
# covariance_factor().
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self.data_covariance = np.atleast_2d(
np.cov(
self.dataset,
rowvar=1,
bias=False))
self.data_inv_cov = np.linalg.inv(self.data_covariance)
self.covariance = self.data_covariance * self.factor ** 2
self.inv_cov = self.data_inv_cov / self.factor ** 2
self.norm_factor = np.sqrt(
np.linalg.det(
2 * np.pi * self.covariance)) * self.num_dp
def scotts_factor(self):
return np.power(self.num_dp, -1. / (self.dim + 4))
def silverman_factor(self):
return np.power(
self.num_dp * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different
than the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
dim, num_m = np.array(points).shape
if dim != self.dim:
msg = "points have dimension %s, dataset has dimension %s" % (
dim, self.dim)
raise ValueError(msg)
result = np.zeros((num_m,), dtype=np.float)
if num_m >= self.num_dp:
# there are more points than data, so loop over data
for i in range(self.num_dp):
diff = self.dataset[:, i, np.newaxis] - points
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result = result + np.exp(-energy)
else:
# loop over points
for i in range(num_m):
diff = self.dataset - points[:, i, np.newaxis]
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result[i] = np.sum(np.exp(-energy), axis=0)
result = result / self.norm_factor
return result
__call__ = evaluate
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
# Make a closed polygon path
poly = Path( verts )
# Check to see which points are contained withing the Path
return [ idx for idx, p in enumerate(points) if poly.contains_point(p) ]
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, e.g.,::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
numpy = ma
else:
numpy = np
xs = numpy.asarray(xs)
ys = numpy.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*numpy.ones(2*Nx)
y = numpy.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
numpy = ma
else:
numpy = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*numpy.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*numpy.ones(Nx)
x = numpy.concatenate( (x, x[::-1]) )
y = numpy.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
def cross_from_below(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, e.g., the i's where::
x[i-1]<threshold and x[i]>=threshold
Example code::
import matplotlib.pyplot as plt
t = np.arange(0.0, 2.0, 0.1)
s = np.sin(2*np.pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, '-o')
ax.axhline(0.5)
ax.axhline(-0.5)
ind = cross_from_below(s, 0.5)
ax.vlines(t[ind], -1, 1)
ind = cross_from_above(s, -0.5)
ax.vlines(t[ind], -1, 1)
plt.show()
.. seealso::
:func:`cross_from_above` and :func:`contiguous_regions`
"""
x = np.asarray(x)
threshold = threshold
ind = np.nonzero( (x[:-1]<threshold) & (x[1:]>=threshold))[0]
if len(ind): return ind+1
else: return ind
def cross_from_above(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, e.g., the i's where::
x[i-1]>threshold and x[i]<=threshold
.. seealso::
:func:`cross_from_below` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero( (x[:-1]>=threshold) & (x[1:]<threshold))[0]
if len(ind): return ind+1
else: return ind
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
def offset_line(y, yerr):
"""
Offsets an array *y* by +/- an error and returns a tuple (y - err, y + err).
The error term can be:
* A scalar. In this case, the returned tuple is obvious.
* A vector of the same length as *y*. The quantities y +/- err are computed
component-wise.
* A tuple of length 2. In this case, yerr[0] is the error below *y* and
yerr[1] is error above *y*. For example::
from pylab import *
x = linspace(0, 2*pi, num=100, endpoint=True)
y = sin(x)
y_minus, y_plus = mlab.offset_line(y, 0.1)
plot(x, y)
fill_between(x, ym, y2=yp)
show()
"""
if cbook.is_numlike(yerr) or (cbook.iterable(yerr) and len(yerr) == len(y)):
ymin = y - yerr
ymax = y + yerr
elif len(yerr) == 2:
ymin, ymax = y - yerr[0], y + yerr[1]
else:
raise ValueError("yerr must be scalar, 1xN or 2xN")
return ymin, ymax
|
Reagankm/KnockKnock
|
venv/lib/python3.4/site-packages/matplotlib/mlab.py
|
Python
|
gpl-2.0
| 128,046
|
[
"Gaussian"
] |
de289236a11c26ba41b95347b0a8ef99778ff6e9cf8e9811af0c9b5173152091
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .configurer import SuiteConfigurer
from .filter import Filter, EmptySuiteRemover
from .itemlist import ItemList
from .keyword import Keyword, Keywords
from .metadata import Metadata
from .modelobject import ModelObject
from .tagsetter import TagSetter
from .testcase import TestCase, TestCases
class TestSuite(ModelObject):
"""Base model for single suite.
Extended by :class:`robot.running.model.TestSuite` and
:class:`robot.result.model.TestSuite`.
"""
__slots__ = ['parent', 'source', '_name', 'doc', '_my_visitors']
test_class = TestCase #: Internal usage only.
keyword_class = Keyword #: Internal usage only.
def __init__(self, name='', doc='', metadata=None, source=None):
self.parent = None #: Parent suite. ``None`` with the root suite.
self._name = name
self.doc = doc #: Test suite documentation.
self.metadata = metadata
self.source = source #: Path to the source file or directory.
self.suites = None
self.tests = None
self.keywords = None
self._my_visitors = []
@property
def _visitors(self):
parent_visitors = self.parent._visitors if self.parent else []
return self._my_visitors + parent_visitors
@property
def name(self):
"""Test suite name. If not set, constructed from child suite names."""
return self._name or ' & '.join(s.name for s in self.suites)
@name.setter
def name(self, name):
self._name = name
@property
def longname(self):
"""Suite name prefixed with the long name of the parent suite."""
if not self.parent:
return self.name
return '%s.%s' % (self.parent.longname, self.name)
@setter
def metadata(self, metadata):
"""Free test suite metadata as a dictionary."""
return Metadata(metadata)
@setter
def suites(self, suites):
"""Child suites as a :class:`~.TestSuites` object."""
return TestSuites(self.__class__, self, suites)
@setter
def tests(self, tests):
"""Tests as a :class:`~.TestCases` object."""
return TestCases(self.test_class, self, tests)
@setter
def keywords(self, keywords):
"""Suite setup and teardown as a :class:`~.Keywords` object."""
return Keywords(self.keyword_class, self, keywords)
@property
def id(self):
"""An automatically generated unique id.
The root suite has id ``s1``, its child suites have ids ``s1-s1``,
``s1-s2``, ..., their child suites get ids ``s1-s1-s1``, ``s1-s1-s2``,
..., ``s1-s2-s1``, ..., and so on.
The first test in a suite has an id like ``s1-t1``, the second has an
id ``s1-t2``, and so on. Similarly keywords in suites (setup/teardown)
and in tests get ids like ``s1-k1``, ``s1-t1-k1``, and ``s1-s4-t2-k5``.
"""
if not self.parent:
return 's1'
return '%s-s%d' % (self.parent.id, self.parent.suites.index(self)+1)
@property
def test_count(self):
"""Number of the tests in this suite, recursively."""
return len(self.tests) + sum(suite.test_count for suite in self.suites)
def set_tags(self, add=None, remove=None, persist=False):
"""Add and/or remove specified tags to the tests in this suite.
:param add: Tags to add as a list or, if adding only one,
as a single string.
:param remove: Tags to remove as a list or as a single string.
Can be given as patterns where ``*`` and ``?`` work as wildcards.
:param persist: Add/remove specified tags also to new tests added
to this suite in the future.
"""
setter = TagSetter(add, remove)
self.visit(setter)
if persist:
self._my_visitors.append(setter)
def filter(self, included_suites=None, included_tests=None,
included_tags=None, excluded_tags=None):
"""Select test cases and remove others from this suite.
Parameters have the same semantics as ``--suite``, ``--test``,
``--include``, and ``--exclude`` command line options. All of them
can be given as a list of strings, or when selecting only one, as
a single string.
Child suites that contain no tests after filtering are automatically
removed.
Example::
suite.filter(included_tests=['Test 1', '* Example'],
included_tags='priority-1')
"""
self.visit(Filter(included_suites, included_tests,
included_tags, excluded_tags))
def configure(self, **options):
"""A shortcut to configure a suite using one method call.
:param options: Passed to
:class:`~robot.model.configurer.SuiteConfigurer` that will then
set suite attributes, call :meth:`filter`, etc. as needed.
"""
self.visit(SuiteConfigurer(**options))
def remove_empty_suites(self):
"""Removes all child suites not containing any tests, recursively."""
self.visit(EmptySuiteRemover())
def visit(self, visitor):
""":mod:`Visitor interface <robot.model.visitor>` entry-point."""
visitor.visit_suite(self)
class TestSuites(ItemList):
__slots__ = []
def __init__(self, suite_class=TestSuite, parent=None, suites=None):
ItemList.__init__(self, suite_class, {'parent': parent}, suites)
|
snyderr/robotframework
|
src/robot/model/testsuite.py
|
Python
|
apache-2.0
| 6,114
|
[
"VisIt"
] |
06cb69c2ca4b6fb5a1caa2ee90babb7b4d2f38e9408b88ad025041d5ce4ea9ab
|
from .. import nengo as nengo
from ..nengo.connection import gen_transform
## This example demonstrates how to create a neuronal ensemble that will combine two 1-D
## inputs into one 2-D representation.
##
## Network diagram:
##
## [Input A] ---> (A) --.
## v
## (C)
## ^
## [Input B] ---> (B) --'
##
##
## Network behaviour:
## A = Input_A
## B = Input_B
## C = [A,B]
##
# Create the nengo model
model = nengo.Model('Combining')
# Create the model inputs
model.make_node('Input A', [0]) # Create a controllable input function
# with a starting value of 0
model.make_node('Input B', [0]) # Create another controllable input
# function with a starting value of 0
# Create the neuronal ensembles
model.make_ensemble('A', 100, 1) # Make a population with 100 neurons, 1 dimension
model.make_ensemble('B', 100, 1) # Make a population with 100 neurons, 1 dimension
model.make_ensemble('C', 100, 2, # Make a population with 100 neurons, 2 dimensions,
radius = 1.5) # and set a larger radius (so [1,1] input still
# fits within the circle of that radius)
# Create the connections within the model
model.connect('Input A', 'A') # Connect the inputs to the appropriate neuron
model.connect('Input B', 'B') # populations (default connection is identity)
model.connect('A', 'C', gen_transform(index_post = 0))
# Connect with A to the first dimension of C
model.connect('B', 'C', gen_transform(index_post = 1))
# Connect with B to the second dimension of C
# Build the model
model.build()
# Run the model
model.run(1) # Run the model for 1 second
|
jaberg/nengo
|
examples/combining.py
|
Python
|
mit
| 1,970
|
[
"NEURON"
] |
33aa9fd75f1d286fb0fd796158d912ab03b6eb1cef6d88fcb9bc19c9ce820214
|
from multiasecalc.lammps import unitconversion
from ase.optimize.optimize import Dynamics
from ase.io.trajectory import PickleTrajectory
from ase.md.logger import MDLogger
from ase import units
from random import random
import numpy as np
class LAMMPSOptimizer(Dynamics):
""" Geometry optimizer for LAMMPS. works only with LAMMPS calculators """
def __init__(self, atoms, restart=None, logfile=None, trajectory=None, algorithm='cg', relax_cell=False):
Dynamics.__init__(self, atoms, logfile, trajectory)
self.algorithm = algorithm
self.relax_cell = relax_cell
def run(self, fmax=0.001, steps=1e8):
self.atoms.calc.minimize(self.atoms, ftol=fmax, maxeval=steps, min_style=self.algorithm, relax_cell=self.relax_cell)
class LAMMPSMolecularDynamics(Dynamics):
""" Base class for molecular dynamics with LAMMPS. Requires a LAMMPS calculator. """
def __init__(self, atoms, timestep, integrator='verlet', trajectory=None,
traj_interval=1000, logfile=None, loginterval=100):
Dynamics.__init__(self, atoms, None, None)
self.dt = timestep
if integrator == 'verlet':
self.run_style = 'verlet'
else:
raise RuntimeError('Unknown integrator: %s' % thermostat)
if trajectory:
if isinstance(trajectory, str):
trajectory = PickleTrajectory(trajectory, 'w', atoms)
self.attach(trajectory, interval=traj_interval)
if logfile:
self.attach(MDLogger(dyn=self, atoms=atoms, logfile=logfile),
interval=loginterval)
self.fix = None
self.cell_relaxed = False
def run(self, steps=50, constraints=[]):
self.nsteps = 0
fix = 'all '+self.fix
calc = self.atoms.calc
it = self.run_iterator(steps)
calc.molecular_dynamics(self.atoms, self.dt, fix, it, self.cell_relaxed, steps, constraints)
def run_iterator(self, steps):
cur_step = 0
for target_step in range(steps+1):
for function, interval, args, kwargs in self.observers:
if target_step % interval == 0:
if target_step > cur_step:
yield target_step - cur_step
cur_step = target_step
function(*args, **kwargs)
if cur_step < steps:
yield steps - cur_step
def get_time(self):
return self.nsteps * self.dt
class LAMMPS_NVE(LAMMPSMolecularDynamics):
""" Microcanonical ensemble """
def __init__(self, atoms, timestep, **kwargs):
LAMMPSMolecularDynamics.__init__(self, atoms, timestep, **kwargs)
self.fix = 'nve'
class LAMMPS_NVT(LAMMPSMolecularDynamics):
""" Constant temperature calculations with Nose-Hoover or Berendsen """
def __init__(self, atoms, timestep, temperature, t_damp=100*units.fs,
thermostat='Nose-Hoover', ramp_to_temp=None, **kwargs):
LAMMPSMolecularDynamics.__init__(self, atoms, timestep, **kwargs)
if thermostat == 'Nose-Hoover':
cmd = 'nvt temp'
elif thermostat == 'Berendsen':
cmd = 'temp/berendsen'
else:
raise RuntimeError('Unknown thermostat: %s' % thermostat)
t_damp = atoms.calc.from_ase_units(t_damp, 'time')
if not ramp_to_temp: ramp_to_temp = temperature
self.fix = '%s %f %f %f' %(cmd, temperature, ramp_to_temp, t_damp)
class LAMMPS_NPT(LAMMPSMolecularDynamics):
""" Constant temperature and pressure calculations with Nose-Hoover """
def __init__(self, atoms, timestep, temperature, externalstress, isotropic=True, t_damp=100*units.fs, p_damp=1000*units.fs, ramp_to_temp=None, **kwargs):
LAMMPSMolecularDynamics.__init__(self, atoms, timestep, **kwargs)
pressure = atoms.calc.from_ase_units(externalstress, 'pressure')
t_damp = atoms.calc.from_ase_units(t_damp, 'time')
p_damp = atoms.calc.from_ase_units(p_damp, 'time')
if not ramp_to_temp: ramp_to_temp = temperature
if hasattr(pressure, 'shape'):
px, pxy, pxz = pressure[0,:]
py, pyz = pressure[1,1:]
pz = pressure[2,2]
p_diags = [px, py, pz]
args = ' '.join(['%s %f %f %f' % ('xyz'[i], p_diags[i], p_diags[i], p_damp) for i in range(3) if atoms.pbc[i]])
if atoms.pbc[0] and atoms.pbc[1]:
args += ' xy %f %f %f' % (pxy, pxy, p_damp)
if atoms.pbc[1] and atoms.pbc[2]:
args += ' yz %f %f %f' % (pyz, pyz, p_damp)
if atoms.pbc[1] and atoms.pbc[2]:
args += ' xz %f %f %f' % (pxz, pxz, p_damp)
else:
pvalues = '%f %f %f' % (pressure, pressure, p_damp)
if atoms.pbc.all():
if isotropic:
coupling = 'iso'
elif (np.dot(atoms.cell, atoms.cell) == atoms.cell**2).all():
# orthogonal cell
coupling = 'aniso'
else:
coupling = 'tri'
args = '%s %s' % (coupling, pvalues)
else:
args = ' '.join(['%s %s' % ('xyz'[i], pvalues) for i in range(3) if atoms.pbc[i]])
self.fix = 'npt temp %f %f %f %s' %(temperature, ramp_to_temp, t_damp, args)
self.cell_relaxed = True
class SimpleConstraint:
def __init__(self, indices):
self.indices = indices
def get_commands(self, atoms):
fix = self.get_fix(atoms)
id = '%s%s' % (self.__class__.__name__, abs(hash(tuple(self.indices))))
groupname = 'group%s' % id
fixname = 'fix%s' % id
cmds = []
indices_str = ' '.join([str(i+1) for i in self.indices])
cmds.append('group %s id %s' % (groupname, indices_str))
cmds.append('fix %s %s %s' % (fixname, groupname, fix))
return cmds
def get_fix(self, atoms):
raise NotImplementedError()
class Spring(SimpleConstraint):
def __init__(self, indices, point, spring_constant, R0=0.0):
SimpleConstraint.__init__(self, indices)
self.point = point
self.K = spring_constant
self.R0 = R0
def get_fix(self, atoms):
K = atoms.calc.from_ase_units(self.K, 'force')
x, y, z = atoms.calc.prism.vector_to_lammps(self.point)
return 'spring tether %f %f %f %f %f' % (K, x, y, z, self.R0)
class AddForce(SimpleConstraint):
def __init__(self, indices, total_force):
SimpleConstraint.__init__(self, indices)
self.total_force = total_force
def get_fix(self, atoms):
force = self.total_force / len(self.indices)
force = atoms.calc.prism.vector_to_lammps(force)
fx, fy, fz = atoms.calc.from_ase_units(force, 'force')
return 'addforce %f %f %f' % (fx, fy, fz)
class LJWall:
def __init__(self, face, epsilon, sigma, wall_offset=None, final_wall_offset=None, mixing='arithmetic'):
self.face = face
self.epsilon = epsilon
self.sigma = sigma
self.offset = wall_offset
self.final_offset = final_wall_offset
self.mixing = mixing
self.commands = []
self.ngroups = 0
self.nfixes = 0
self.id = '%s%s' % (self.__class__.__name__, abs(hash(epsilon + sigma) + hash(face)))
#if 'hi' in face:
# self.offset = -abs(self.offset)
def get_commands(self, atoms):
ffdata = atoms.calc.ff_data
if self.final_offset != None:
rampname = 'ramp%s' % self.id
self.commands.append('variable %s equal ramp(%f,%f)' % (rampname, self.offset, self.final_offset))
coord = 'v_%s' % rampname
elif self.offset != None:
coord = '%f' % self.offset
else:
coord = 'EDGE'
for tp in atoms.calc.data.atom_typeorder:
actual_type = ffdata.get_actual_type('atom', tp)
eps, sig = ffdata.get_params('atom', actual_type)['Pair Coeffs']
mixeps = np.sqrt(self.epsilon*eps)
if self.mixing == 'arithmetic':
mixsig = (self.sigma+sig)/2
elif self.mixing == 'geometric':
mixsig = np.sqrt(self.sigma*sig)
else:
raise RuntimeError('Invalid mixing type: %s' % self.mixing)
typeid = atoms.calc.data.atom_typeorder.index(tp) + 1
groupname = self.create_group_by_type(typeid)
cutoff = 10.0
fixstr = 'wall/lj126 %s %s %f %f %f units box pbc yes' % (self.face, coord, mixeps, mixsig, cutoff)
self.create_fix(groupname, fixstr)
return self.commands
def create_group_by_type(self, typeid):
groupname = 'group%s%s' % (self.id, typeid)
self.commands.append('group %s type %i' % (groupname, typeid))
self.ngroups += 1
return groupname
def create_fix(self, groupname, fixstr):
fixname = 'fix%s%i' % (self.id, self.nfixes)
self.commands.append('fix %s %s %s' % (fixname, groupname, fixstr))
self.nfixes += 1
return fixname
|
csmm/multiase
|
multiasecalc/lammps/dynamics.py
|
Python
|
gpl-2.0
| 7,937
|
[
"ASE",
"LAMMPS"
] |
85f65c9b7d7d6acfac34a9174000b319bc7ac363020801bb3b93f93d6c460361
|
'''
Created on Aug 5, 2014
@author: gearsad
'''
# To allow the project to be run from outside Eclipse PyDev
import sys
sys.path.append('../')
import vtk
from math import cos, sin
from scene import Terrain
from scene import Bot
from scene import Axes
from scene import LIDAR
from scene import InteractorMapUser
from scene import Interactor1stPersonUser
from scene import Interactor3rdPerson
from scene import Interactor1stPersonVuzix
def __3DRenderingLoop(obj, event):
'''
Main loop for rendering at a constant ~30Hz
'''
iren = obj
iren.GetRenderWindow().Render()
if __name__ == '__main__':
renderWindow = vtk.vtkRenderWindow()
# A renderer and render window
renderers = []
renderers.append(vtk.vtkRenderer())
renderers.append(vtk.vtkRenderer())
renderWindow = vtk.vtkRenderWindow()
for renderer in renderers:
renderWindow.AddRenderer(renderer)
renderers[0].SetViewport(0, 0, 0.5, 1)
renderers[1].SetViewport(0.5, 0, 1, 1)
# Let's put in the other screen
renderWindow.SetSize(1600, 1024)
# renderWindow.SetPosition(1600,0)
# An interactor
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Create our new scene objects...
terrain = Terrain.Terrain(renderers, 100)
# Initialize a set of test bots
numBots = 8
bots = []
for i in xrange(0, numBots):
bot = Bot.Bot(renderers)
# Put the bot in a cool location
location = [10 * cos(i / float(numBots) * 6.242), 0, 10 * sin(i / float(numBots) * 6.242)]
bot.SetPositionVec3(location)
# Make them all look outward
yRot = 90.0 - i / float(numBots) * 360.0
bot.SetOrientationVec3([0, yRot, 0])
bots.append(bot)
# axes = Axes.Axes(renderer)
# Render an image (lights and cameras are created automatically)
renderWindow.Render()
#FORCE the cameras to a shared, reasonable position
for renderer in renderers:
camera = renderer.GetActiveCamera()
camera.SetPosition([20, 20, 20])
camera.SetFocalPoint([0, 0, 0])
# Sign up to receive TimerEvent for the timed rendering loop
renderObserverId = renderWindowInteractor.AddObserver('TimerEvent', __3DRenderingLoop)
__3DViewLoopTimerId = renderWindowInteractor.CreateRepeatingTimer(30);
# Set up the custom style for your camera interactor
# Uncomment one of the following below to select it
#interactorStyle = InteractorMapUser.InteractorMapUser(renderer, renderWindowInteractor)
#interactorStyle = Interactor1stPersonUser.Interactor1stPersonUser(renderer, renderWindowInteractor)
#interactorStyle = Interactor3rdPerson.Interactor3rdPerson(renderer, renderWindowInteractor, bots[1], [0, 7, -10])
interactorStyle = Interactor1stPersonVuzix.Interactor1stPersonVuzix(renderers, renderWindowInteractor)
# Now set it as the interactor style for the interactor
renderWindowInteractor.SetInteractorStyle(interactorStyle)
interactorStyle.EnabledOn()
# Begin mouse interaction
renderWindowInteractor.Start()
renderWindowInteractor.Initialize()
# Once done, remove the timer to clean up just to be neat
renderWindowInteractor.DestroyTimer(__3DViewLoopTimerId)
renderWindowInteractor.RemoveObserver(renderObserverId)
pass
|
GearsAD/semisorted_arnerve
|
sandbox/bot_vis_platform_oculus/bot_vis_platform/bot_vis_main.py
|
Python
|
mit
| 3,432
|
[
"VTK"
] |
b4ef0995167be55e8f5f3a8b3f69a2dfec29518870024fc8722a879cc8833973
|
"""siegert.py: Function calculating the firing rates of leaky
integrate-and-fire neurons given their parameter and mean and variance
of the input. Rates rates for delta shaped PSCs after Brunel & Hakim 1999.
Rate of neuron with synaptic filtering with time constant tau_s after
Fourcoud & Brunel 2002.
Authors: Moritz Helias, Jannis Schuecker, Hannah Bos
"""
from scipy.integrate import quad
from scipy.special import erf
from scipy.special import zetac
import numpy as np
import math
"""
Variables used in this module:
tau_m: membrane time constant
tau_r: refractory time constant
V_th: threshold
V_r: reset potential
mu: mean input
sigma: std of equivalent GWN input
"""
def nu_0(tau_m, tau_r, V_th, V_r, mu, sigma):
""" Calculates stationary firing rates for delta shaped PSCs."""
if mu <= V_th + (0.95 * abs(V_th) - abs(V_th)):
return siegert1(tau_m, tau_r, V_th, V_r, mu, sigma)
else:
return siegert2(tau_m, tau_r, V_th, V_r, mu, sigma)
def nu0_fb(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma):
alpha = np.sqrt(2)*abs(zetac(0.5)+1)
# effective threshold
V_th1 = V_th + sigma*alpha/2.*np.sqrt(tau_s/tau_m)
# effective reset
V_r1 = V_r + sigma*alpha/2.*np.sqrt(tau_s/tau_m)
# use standard Siegert with modified threshold and reset
return nu_0(tau_m, tau_r, V_th1, V_r1, mu, sigma)
# stationary firing rate of neuron with synaptic low-pass filter
# of time constant tau_s driven by Gaussian noise with mean mu and
# standard deviation sigma, from Fourcaud & Brunel 2002
def nu0_fb433(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma, switch_fb=-7.):
"""Calculates stationary firing rates for exponential PSCs using
expression with taylor expansion in k = sqrt(tau_s/tau_m) (Eq. 433
in Fourcoud & Brunel 2002)
"""
alpha = np.sqrt(2.) * abs(zetac(0.5) + 1)
x_th = np.sqrt(2.) * (V_th - mu) / sigma
x_r = np.sqrt(2.) * (V_r - mu) / sigma
if x_r < switch_fb:
return nu0_fb(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma)
# preventing overflow in np.exponent in Phi(s)
if x_th > 20.0 / np.sqrt(2.):
result = nu_0(tau_m, tau_r, V_th, V_r, mu, sigma)
else:
r = nu_0(tau_m, tau_r, V_th, V_r, mu, sigma)
dPhi = Phi(x_th) - Phi(x_r)
result = r - np.sqrt(tau_s / tau_m) * alpha / \
(tau_m * np.sqrt(2)) * dPhi * (r * tau_m)**2
if math.isnan(result):
print mu, sigma, x_th, x_r
return result
def Phi(s):
return np.sqrt(np.pi / 2.) * (np.exp(s**2 / 2.) * (1 + erf(s / np.sqrt(2))))
def Phi_prime_mu(s, sigma):
return -np.sqrt(np.pi) / sigma * (s * np.exp(s**2 / 2.) * (1 + erf(s / np.sqrt(2)))
+ np.sqrt(2) / np.sqrt(np.pi))
def siegert1(tau_m, tau_r, V_th, V_r, mu, sigma):
# for mu < V_th
y_th = (V_th - mu) / sigma
y_r = (V_r - mu) / sigma
def integrand(u):
if u == 0:
return np.exp(-y_th**2) * 2 * (y_th - y_r)
else:
return np.exp(-(u - y_th)**2) * (1.0 - np.exp(2 * (y_r - y_th) * u)) / u
lower_bound = y_th
err_dn = 1.0
while err_dn > 1e-12 and lower_bound > 1e-16:
err_dn = integrand(lower_bound)
if err_dn > 1e-12:
lower_bound /= 2
upper_bound = y_th
err_up = 1.0
while err_up > 1e-12:
err_up = integrand(upper_bound)
if err_up > 1e-12:
upper_bound *= 2
# check preventing overflow
if y_th >= 20:
out = 0.
if y_th < 20:
out = 1.0 / (tau_r + np.exp(y_th**2)
* quad(integrand, lower_bound, upper_bound)[0] * tau_m)
return out
def siegert2(tau_m, tau_r, V_th, V_r, mu, sigma):
# for mu > V_th
y_th = (V_th - mu) / sigma
y_r = (V_r - mu) / sigma
def integrand(u):
if u == 0:
return 2 * (y_th - y_r)
else:
return (np.exp(2 * y_th * u - u**2) - np.exp(2 * y_r * u - u**2)) / u
upper_bound = 1.0
err = 1.0
while err > 1e-12:
err = integrand(upper_bound)
upper_bound *= 2
return 1.0 / (tau_r + quad(integrand, 0.0, upper_bound)[0] * tau_m)
def d_nu_d_mu_fb433(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma):
alpha = np.sqrt(2) * abs(zetac(0.5) + 1)
x_th = np.sqrt(2) * (V_th - mu) / sigma
x_r = np.sqrt(2) * (V_r - mu) / sigma
integral = 1. / (nu_0(tau_m, tau_r, V_th, V_r, mu, sigma) * tau_m)
prefactor = np.sqrt(tau_s / tau_m) * alpha / (tau_m * np.sqrt(2))
dnudmu = d_nu_d_mu(tau_m, tau_r, V_th, V_r, mu, sigma)
dPhi_prime = Phi_prime_mu(x_th, sigma) - Phi_prime_mu(x_r, sigma)
dPhi = Phi(x_th) - Phi(x_r)
phi = dPhi_prime * integral + (2 * np.sqrt(2) / sigma) * dPhi**2
return dnudmu - prefactor * phi / integral**3
def d_nu_d_mu(tau_m, tau_r, V_th, V_r, mu, sigma):
y_th = (V_th - mu)/sigma
y_r = (V_r - mu)/sigma
nu0 = nu_0(tau_m, tau_r, V_th, V_r, mu, sigma)
return np.sqrt(np.pi) * tau_m * nu0**2 / sigma * (np.exp(y_th**2) * (1 + erf(y_th)) - np.exp(y_r**2) * (1 + erf(y_r)))
|
INM-6/neural_network_meanfield
|
siegert.py
|
Python
|
gpl-3.0
| 5,026
|
[
"Gaussian",
"NEURON"
] |
36e8ebbf063ba7bf72026dd77a338621c418223eb79bbc1067e530912725fa07
|
from espressomd import System, shapes, electrokinetics
import sys
system = System(box_l = [10, 10, 10])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.cell_system.skin = 0.4
system.time_step = 0.1
ek = electrokinetics.Electrokinetics(
lb_density=1, friction=1, agrid=1, viscosity=1, T=1, prefactor=1)
pos = electrokinetics.Species(
density=0.05, D=0.1, valency=1, ext_force=[0, 0, 1.])
neg = electrokinetics.Species(
density=0.05, D=0.1, valency=-1, ext_force=[0, 0, -1.])
ek.add_species(pos)
ek.add_species(neg)
system.actors.add(ek)
print(ek.get_params())
print(pos.get_params())
print(neg.get_params())
print(pos[5, 5, 5].density)
ek_wall_left = electrokinetics.EKBoundary(
shape=shapes.Wall(dist=1, normal=[1, 0, 0]), charge_density=-0.01)
ek_wall_right = electrokinetics.EKBoundary(
shape=shapes.Wall(dist=-9, normal=[-1, 0, 0]), charge_density=0.01)
system.ekboundaries.add(ek_wall_left)
system.ekboundaries.add(ek_wall_right)
for i in range(1000):
system.integrator.run(100)
sys.stdout.write("\rIntegrating: %03i" % i)
sys.stdout.flush()
pos.print_vtk_density("ek/pos_dens_%i.vtk" % i)
neg.print_vtk_density("ek/neg_dens_%i.vtk" % i)
pos.print_vtk_flux("ek/pos_flux_%i.vtk" % i)
neg.print_vtk_flux("ek/neg_flux_%i.vtk" % i)
ek.print_vtk_velocity("ek/ekv_%i.vtk" % i)
ek.print_vtk_boundary("ek/ekb_%i.vtk" % i)
|
KonradBreitsprecher/espresso
|
samples/ekboundaries.py
|
Python
|
gpl-3.0
| 1,441
|
[
"VTK"
] |
b4e9504edb7b23a5c5f40a1104a773679393c6e8b58d517caa88f2d08058ff9f
|
##===============================================================================
# This file is part of TEMPy.
# It describes the implementation of Assembly class for the purpose of fitting multiple component into the assembly map
#
# TEMPy is a software designed to help the user in the manipulation
# and analyses of macromolecular assemblies using 3D electron microscopy maps.
#
# Copyright 2010-2014 TEMPy Inventors and Birkbeck College University of London.
# The TEMPy Inventors are: Maya Topf, Daven Vasishtan,
# Arun Prasad Pandurangan, Irene Farabella, Agnel-Praveen Joseph,
# Harpal Sahota
#
#
# TEMPy is available under Public Licence.
#
# Please cite your use of TEMPy in published work:
#
# Vasishtan D, Topf M. (2011) J Struct Biol 174:333-343. Scoring functions for cryoEM density fitting.
# Pandurangan AP, Vasishtan D, Topf M. (2015) Structure 23:2365-2376. GAMMA-TEMPy: Simultaneous fitting of components in 3D-EM Maps of their assembly using genetic algorithm.
#===============================================================================
from TEMPy.ProtRep_Biopy import *
from TEMPy.StructureBlurrer import StructureBlurrer
#from EMMap import *
#from MapParser import *
#from PDBParser import *
#from StructureBlurrer import *
#from Vector import *
#from VQ import *
#from Quaternion import *
class Assembly:
"""
A class to represent multi-subunit component and its corresponding density map.
"""
def __init__(self, structList):
"""
A constructor to initialise the assembly object.
Arguments:
*structList*
A list of BioPy_Structure objects.
"""
self.structList = structList
self.initMapList = []
self.mapList = []
def build_maps(self, resolution, template_map, sig_coeff=0.356):
"""
Build list of maps corresponding to the protein components in the structList.
Arguments:
*resolution*
Desired resolution of the density map in Angstrom units.
*template_map*
A map object that will be uesd as the template to build maps of for the individual maps. Usually the input map used for the assembly fitting.
*sigma_coeff*
the sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
"""
sb = StructureBlurrer()
for x in self.structList:
self.mapList.append(sb.gaussian_blur(x, resolution, template_map, sig_coeff))
self.initMapList.append(self.mapList[-1].copy())
def randomise_structs(self, max_trans, max_rot, v_grain=30, rad=False):
"""
Randomise the position and orientation of the protein components in the structList.
Arguments:
*max_trans*
Maximum translation permitted
*max_rot*
Maximum rotation permitted (in degree if rad=False)
*v_grain*
Graning Level for the generation of random vetors (default=30)
"""
for x in self.structList:
x.randomise_position(max_trans, max_rot, v_grain, rad)
def randomise_structs_and_maps(self, max_trans, max_rot, v_grain=30, rad=False):
"""
Randomise the position and orientation of the protein components and its corresponding map objects.
Arguments:
*max_trans*
Maximum translation permitted
*max_rot*
Maximum rotation permitted (in degree if rad=False)
*v_grain*
Graning Level for the generation of random vetors (default=30)
"""
if len(self.mapList) != len(self.structList):
print('Maps not built yet')
else:
for x in range(len(self.structList)):
com = self.structList[x].CoM.copy()
rx,ry,rz,ra,tx,ty,tz = self.structList[x].randomise_position(max_trans, max_rot, v_grain, rad, verbose=True)
self.mapList[x] = self.mapList[x].rotate_by_axis_angle(rx, ry, rz, ra, com)
self.mapList[x] = self.mapList[x].translate(tx,ty,tz)
def reset_structs(self):
"""
Translate the list of structure objects back into initial position.
"""
for x in self.structList:
x.reset_position()
def reset_maps(self):
"""
Undo all the transformations applied to the list of map objects and restore it to its original state.
"""
for x in range(len(self.mapList)):
self.mapList[x] = self.initMapList[x].copy()
def reset_all(self):
"""
Reset the map and structure objects to is initial state.
"""
self.reset_maps()
self.reset_structs()
def move_map_and_prot_by_aa(self, index, rx, ry, rz, ra, tx, ty, tz):
"""
Translate and rotate the structure and map objects in the assembly around its centre given an axis and angle.
Arguments:
*index*
Index of the structure and map list.
*rx,ry,rz*
Axis to rotate about, ie. rx,ry,rz = 0,0,1 rotates the structure and map round the xy-plane.
*ra*
Angle (in degrees) to rotate map.
*tx,ty,tz*
Distance in Angstroms to move structure and map in respective x, y, and z directions.
"""
com = self.structList[index].CoM.copy()
self.structList[index].rotate_by_axis_angle(rx, ry, rz, ra, tx, ty, tz)
self.mapList[index] = self.mapList[index].rotate_by_axis_angle(rx, ry, rz, ra, com)
self.mapList[index] = self.mapList[index].translate(tx,ty,tz)
def move_map_and_prot_by_euler(self, index, rx, ry, rz, tx, ty, tz):
"""
Translate and rotate the structure and map objects in the assembly around its centre using Euler angles.
Arguments:
*index*
Index of the structure and map list.
*rx,ry,rz*
Axis to rotate about, ie. rx,ry,rz = 0,0,1 rotates the structure and map round the xy-plane.
*ra*
Angle (in degrees) to rotate map.
*tx,ty,tz*
Distance in Angstroms to move structure and map in respective x, y, and z directions.
"""
com = self.structList[index].CoM.copy()
self.structList[index].rotate_by_euler(rx, ry, rz, 0, 0, 0)
self.structList[index].translate(tx,ty,tz)
self.mapList[index] = self.mapList[index].rotate_by_euler(rx, ry, rz, com)
self.mapList[index] = self.mapList[index].translate(tx,ty,tz)
def move_map_and_prot_by_mat(self, index, mat, tx, ty, tz):
"""
Translate and rotate the structure and map objects around pivot given by CoM using a translation vector and a rotation matrix respectively.
Arguments:
*mat*
3x3 matrix used to rotate structure and map objects.
*tx,ty,tz*
Distance in Angstroms to move structure and map in respective x, y, and z directions.
"""
com = self.structList[index].CoM.copy()
self.structList[index].rotate_by_mat(mat)
self.structList[index].translate(tx,ty,tz)
self.mapList[index] = self.mapList[index].rotate_by_matrix(mat, com)
self.mapList[index] = self.mapList[index].translate(tx,ty,tz)
def move_map_and_prot_by_quat(self, index, tx, ty, tz, q_param, mat):
"""
Translate the structure objects using a translation vector and rotate it using a quaternion object
Translate and rotate the map objects around pivot given by CoM using a translation vector and a rotation matrix respectively.
Arguments:
*index*
Index of the structure and map list.
*tx,ty,tz*
Distance in Angstroms to move structure and map in respective x, y, and z directions.
*q_param*
Is a list of type [w, x, y, z] which represents a quaternion vector used for rotation
*mat*
3x3 matrix used to rotate structure and map objects.
"""
com = self.structList[index].CoM.copy()
self.structList[index].rotate_by_quaternion(q_param)
self.structList[index].translate(tx,ty,tz)
self.mapList[index] = self.mapList[index].rotate_by_matrix(mat, com)
self.mapList[index] = self.mapList[index].translate(tx,ty,tz)
def combine_structs(self):
"""
Used to combine the list of structure objects into a single structure object
"""
if len(self.structList)>1:
return self.structList[0].combine_structures(self.structList[1:])
elif len(self.structList)==1:
return self.structList[0]
else:
print('No structures found')
def combine_maps(self):
"""
Used to combine the list of map objects into a single map object
"""
if len(self.mapList)>1:
newMap = self.mapList[0].copy()
for x in self.mapList[1:]:
newMap.fullMap += x.fullMap
return newMap
elif len(self.structList)==1:
return self.mapList[0].copy()
else:
print('No maps found')
def make_VQ_points(self, threshold, noOfPoints, lap_fil, epochs=300):
"""
Cluster the density maps in the assembly object into n points using vector quantisation algorithm.
Arguments:
*emmap*
Map (to be clustered) instance.
*threshold*
voxels with density above this value are used in the VQ run.
*noOfPoints*
Number of Vector quantisation points to output.
*lap_fil*
True if you want to Laplacian filter the map first, False otherwise. Note that filtering the map change the density values of the map, which is relevant for the threshold parameter.
*epochs*
Number of iterations to run the Vector quantisation algorithm. Default is set to 300
Return:
A list of vector objects containing the vector quatisation points
"""
vq = []
if len(self.mapList) > 0:
for s in range(len(self.mapList)):
vq.append(get_VQ_points(self.mapList[s], threshold, noOfPoints[s], epochs, None, lap_fil))
return vq
def write_all_to_files(self, templateName):
"""
Write the all the strucrure and map objects separately to a pdb and mrc formatted file respectively.
Arguments:
*templateName*
A string representing the prefix of the file name
"""
for x in range(len(self.structList)):
self.structList[x].write_to_PDB(templateName+str(x)+'.pdb')
if len(self.mapList) > 0:
self.mapList[x].write_to_MRC_file(templateName+str(x)+'.mrc')
# Methods not used
#=========================================================================================================
# def make_sub_VQ_points(self, i, threshold, noOfPoints, lap_fil, epochs=300):
# vq = []
# if len(self.mapList) > 0:
# vq.append(get_VQ_points(self.mapList[i], threshold, noOfPoints, epochs, None, lap_fil))
# return vq
# def make_subVQ_points(self, threshold, noOfPoints, lap_fil, epochs=300):
# vq = []
# vq.append(get_VQ_points(self.mapList[0], threshold, noOfPoints, epochs, None, lap_fil))
# return vq
#=========================================================================================================
|
OniDaito/ChimeraXTempy
|
TEMPy/Assembly.py
|
Python
|
mit
| 12,365
|
[
"Gaussian"
] |
481ef716a6d29b3939ca87007410705224067e8e1aa6b16d568d8612f695fe33
|
from keras.models import Sequential, Model
from keras.layers import Flatten, Dense, Dropout, Reshape, Permute, Activation, Input #, merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, Adam
import numpy as np
from scipy.misc import imread, imresize, imsave
from keras import backend as K
# from convnetskeras.customlayers import crosschannelnormalization #, convolution2Dgroup, splittensor, Softmax4D
# from convnetskeras.imagenet_tool import synset_to_id, id_to_synset,synset_to_dfs_ids
"""
Returns a keras model for a CNN.
input data are of the shape (227,227), and the colors in the RGB order (default)
model: The keras model for this convnet
output_dict: Dict of feature layers, asked for in output_layers.
"""
def AlexNet(weights_path=None):
if K.image_dim_ordering() == 'tf':
inputs = Input(shape=(210, 280, 3))
else:
inputs = Input(shape=(3, 210, 280))
conv_1 = Convolution2D(96, 11, 11, subsample=(4,4), activation='relu',
name='conv_1')(inputs)
# initial weights filler? gaussian, std 0.01
conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
#conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
# in caffe: Local Response Normalization (LRN)
# alpha = 1e-4, k=2, beta=0.75, n=5,
conv_2 = ZeroPadding2D((2,2))(conv_2)
# split unnecessary on modern GPUs, no stride
conv_2 = Convolution2D(256, 5, 5, activation="relu", name='conv_2')(conv_2)
conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
#conv_3 = crosschannelnormalization()(conv_3)
conv_3 = ZeroPadding2D((1, 1))(conv_3)
conv_3 = Convolution2D(384, 3, 3, activation='relu', name='conv_3')(conv_3)
conv_4 = ZeroPadding2D((1,1))(conv_3)
# split unnecessary on modern GPUs, no stride
conv_4 = Convolution2D(384, 3, 3, activation="relu", name='conv_4')(conv_4)
conv_5 = ZeroPadding2D((1,1))(conv_4)
# split unnecessary on modern GPUs, no stride
conv_5 = Convolution2D(256, 3, 3, activation="relu", name='conv_5')(conv_5)
dense_1 = MaxPooling2D((3, 3), strides=(2,2), name="convpool_5")(conv_5)
dense_1 = Flatten(name="flatten")(dense_1)
# initial weights filler? gaussian, std 0.005
dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
dense_2 = Dropout(0.5)(dense_1)
dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
dense_3 = Dropout(0.5)(dense_2)
# initial weights filler? gaussian, std 0.01
dense_3 = Dense(256, activation='relu', name='dense_3')(dense_3)
dense_4 = Dropout(0.5)(dense_3)
# output: 14 affordances, gaussian std 0.01
dense_4 = Dense(13, activation='sigmoid', name='dense_4')(dense_4)
# dense_4 = Dense(14, activation='linear', name='dense_4')(dense_4)
model = Model(input=inputs, output=dense_4)
model.summary()
if weights_path:
model.load_weights(weights_path)
sgd = SGD(lr=0.01, decay=0.0005, momentum=0.9) # nesterov=True)
adam = Adam()
# caffe: euclidean loss
model.compile(optimizer=adam, loss='mse')
return model
|
babraham123/deepdriving
|
alexnet_13.py
|
Python
|
mit
| 3,156
|
[
"Gaussian"
] |
cd7b1c2f394ed5eedb3456ca4cf6373ef69dfcb3d6128872d3948e67c9c54ff8
|
import cPickle as pickle
import datetime
import signal
import numpy as np
import theano, theano.tensor as T
from DataHandler import MidiDataHandler as MDH
class connectionTransformation(theano.Op):
"""
theano op class to implement the connection of nodes the neuron connections of the rnn
and the neurons net itself
"""
__props__ = ()
def make_node(self, state, time):
"""
make node ...
:param state:
:param time:
:return:
"""
state = T.as_tensor_variable(state)
time = T.as_tensor_variable(time)
return theano.Apply(self, [state, time], [T.bmatrix()])
def perform(self, node, inputs_storage, output_storage,params = None):
"""
Required: Calculate the function on the inputs and put the variables in
the output storage. Return None.
"""
state, time = inputs_storage
beat = MDH.buildBeat(time)
context = MDH.buildContext(state)
notes = []
for note in range(len(state)):
notes.append(MDH.noteInputForm(note, state, context, beat))
output_storage[0][0] = np.array(notes, dtype='int8')
def getTrainedPhaseData(modelTemp, dataPieces, batches, name="trainedData"):
"""
collect and save the temp trained data for further use also good if the machine crashes so u can continue from the
last checkPoint
:param modelTemp: the model learned so far
:param dataPieces: the data set to learn upon
:param batches: number of batches to run with
:param name: name of the output p file ... p for properties
:return: none ... it pickle(serialize) the result
"""
learnedData = []
for i in range(batches):
ipt, opt = MDH.getPieceBatch(dataPieces)
rnnFlowConnections = modelTemp.updateDatafun(ipt, opt)
learnedData.append((ipt, opt, rnnFlowConnections))
pickle.dump(learnedData, open('output/' + name + '.p', 'wb'))
def generatMusicFunction(modelTemp, pcs, times, keepDataTempLearning=False, name="final",destName = 'output\\'):
"""
as the name sugest this function used to compose music from the trained model
:param modelTemp: the trained model to learn from
:param pcs: the data pices to generet music with
:param times: the data that give the times to the model building phase
:param keepDataTempLearning: is to keep data trough the learning ..make it havier to learn
:param name: the name of the file to generate
:return:
"""
md = MDH.MidiDataHandler()
xIpt, xOpt = map(lambda x: np.array(x, dtype='int8'), MDH.splitToSegments(pcs))
all_outputs = [xOpt[0]]
if keepDataTempLearning:
allDataUpdate = []
modelTemp.initSlowLearning(xIpt[0])
cons = 1
for time in range(MDH.batchLength * times):
resdata = modelTemp.slowFunction(cons)
nnotes = np.sum(resdata[-1][:, 0])
if nnotes < 2:
if cons > 1:
cons = 1
cons -= 0.02
else:
cons += (1 - cons) * 0.3
all_outputs.append(resdata[-1])
if keepDataTempLearning:
allDataUpdate.append(resdata)
md.DataMatrixToMidiFile(np.array(all_outputs), destName + name)
if keepDataTempLearning:
pickle.dump(allDataUpdate, open(destName + name + '.p', 'wb'))
def trainDataPart(modelTemp,pieces,epochs,start=0,destLoctation = 'output\\' ):
"""
initiate the training sequence a pseudo epoch like process of learning in the deep learning
this iterative process initiate the rnn learning procedure of foreword propagation and back propagation in which the
weights between the layers of the deep learning being updated
:param modelTemp: the model to train Continue train the model allowes us to continue training an existent model
so we can create even smarter net in the future.
:param pieces: the data to run trough the net with
:param epochs: the number of epochs / iterations to train upone
:param start: if we continue to train from a given point (not really important past logical continuation of the process)
:return:
"""
mh = MDH.MidiDataHandler()
stopflag = [False]
def signalWorker(signame, sf):
stopflag[0] = True
old_handler = signal.signal(signal.SIGINT, signalWorker)
prevTime = datetime.datetime.now()
with open(destLoctation+"logFile.txt", "w") as text_file:
for i in range(start,start+epochs):
currTime = datetime.datetime.now()
total_time=(currTime -prevTime)
prevTime = currTime
if stopflag[0]:
break
error = modelTemp.updateFunction(*MDH.getPieceBatch(pieces))
# if i % 10 == 0:
print "epoch {}, time {}, error {}".format(i,total_time.total_seconds(),error)
text_file.write("epoch {}, time {}, error {}".format(i,total_time.total_seconds(),error))
if i % 50 == 0 or (i % 20 == 0 and i < 100):
xIpt, xOpt = map(np.array, MDH.splitToSegments(pieces))
mh.DataMatrixToMidiFile(np.concatenate((np.expand_dims(xOpt[0], 0), modelTemp.predict_fun(MDH.batchLength, 1, xIpt[0])), axis=0),'output/sample{}'.format(i))
pickle.dump(modelTemp.learned_config,open(destLoctation+'params{}.p'.format(i), 'wb'))
signal.signal(signal.SIGINT, old_handler)
|
Ilya-Simkin/MusicGuru-RNN-Composer
|
DeepLearning/DeepLearningHandler.py
|
Python
|
lgpl-3.0
| 5,523
|
[
"NEURON"
] |
3c873f2538ef5ba62235a3c44e823244d6ee6267a79e83091ee1169386680b15
|
# Copyright: 2005 Gentoo Foundation
# Author(s): Brian Harring ([email protected])
# License: GPL2
import sys
from portage.cache import template, cache_errors
from portage.cache.template import reconstruct_eclasses
class SQLDatabase(template.database):
"""template class for RDBM based caches
This class is designed such that derivatives don't have to change much code, mostly constant strings.
_BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived
from.
SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE-
basically you need to deal with creation of a unique pkgid. If the dbapi2 rdbms class has a method of
recovering that id, then modify _insert_cpv to remove the extra select.
Creation of a derived class involves supplying _initdb_con, and table_exists.
Additionally, the default schemas may have to be modified.
"""
SCHEMA_PACKAGE_NAME = "package_cache"
SCHEMA_PACKAGE_CREATE = "CREATE TABLE %s (\
pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME
SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
SCHEMA_VALUES_NAME = "values_cache"
SCHEMA_VALUES_CREATE = "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \
key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME)
SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME
SCHEMA_INSERT_CPV_INTO_PACKAGE = "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME
_BaseError = ()
_dbClass = None
autocommits = False
# cleanse_keys = True
# boolean indicating if the derived RDBMS class supports replace syntax
_supports_replace = False
def __init__(self, location, label, auxdbkeys, *args, **config):
"""initialize the instance.
derived classes shouldn't need to override this"""
super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config)
config.setdefault("host","127.0.0.1")
config.setdefault("autocommit", self.autocommits)
self._initdb_con(config)
self.label = self._sfilter(self.label)
def _dbconnect(self, config):
"""should be overridden if the derived class needs special parameters for initializing
the db connection, or cursor"""
self.db = self._dbClass(**config)
self.con = self.db.cursor()
def _initdb_con(self,config):
"""ensure needed tables are in place.
If the derived class needs a different set of table creation commands, overload the approriate
SCHEMA_ attributes. If it needs additional execution beyond, override"""
self._dbconnect(config)
if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
if self.readonly:
raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
self.SCHEMA_PACKAGE_NAME)
try:
self.con.execute(self.SCHEMA_PACKAGE_CREATE)
except self._BaseError as e:
raise cache_errors.InitializationError(self.__class__, e)
if not self._table_exists(self.SCHEMA_VALUES_NAME):
if self.readonly:
raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
self.SCHEMA_VALUES_NAME)
try:
self.con.execute(self.SCHEMA_VALUES_CREATE)
except self._BaseError as e:
raise cache_errors.InitializationError(self.__class__, e)
def _table_exists(self, tbl):
"""return true if a table exists
derived classes must override this"""
raise NotImplementedError
def _sfilter(self, s):
"""meta escaping, returns quoted string for use in sql statements"""
return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"")
def _getitem(self, cpv):
try:
self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s "
"WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
self.label, self._sfilter(cpv)))
except self._BaseError as e:
raise cache_errors.CacheCorruption(self, cpv, e)
rows = self.con.fetchall()
if len(rows) == 0:
raise KeyError(cpv)
vals = dict([(k,"") for k in self._known_keys])
vals.update(dict(rows))
return vals
def _delitem(self, cpv):
"""delete a cpv cache entry
derived RDBM classes for this *must* either support cascaded deletes, or
override this method"""
try:
try:
self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
(self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
if self.autocommits:
self.commit()
except self._BaseError as e:
raise cache_errors.CacheCorruption(self, cpv, e)
if self.con.rowcount <= 0:
raise KeyError(cpv)
except SystemExit:
raise
except Exception:
if not self.autocommits:
self.db.rollback()
# yes, this can roll back a lot more then just the delete. deal.
raise
def __del__(self):
# just to be safe.
if "db" in self.__dict__ and self.db != None:
self.commit()
self.db.close()
def _setitem(self, cpv, values):
try:
# insert.
try:
pkgid = self._insert_cpv(cpv)
except self._BaseError as e:
raise cache_errors.CacheCorruption(cpv, e)
# __getitem__ fills out missing values,
# so we store only what's handed to us and is a known key
db_values = []
for key in self._known_keys:
if key in values and values[key]:
db_values.append({"key":key, "value":values[key]})
if len(db_values) > 0:
try:
self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \
(self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
except self._BaseError as e:
raise cache_errors.CacheCorruption(cpv, e)
if self.autocommits:
self.commit()
except SystemExit:
raise
except Exception:
if not self.autocommits:
try:
self.db.rollback()
except self._BaseError:
pass
raise
def _insert_cpv(self, cpv):
"""uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition
doesn't support auto-increment columns for pkgid.
returns the cpvs new pkgid
note this doesn't commit the transaction. The caller is expected to."""
cpv = self._sfilter(cpv)
if self._supports_replace:
query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1)
else:
# just delete it.
try:
del self[cpv]
except (cache_errors.CacheCorruption, KeyError):
pass
query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
try:
self.con.execute(query_str % (self.label, cpv))
except self._BaseError:
self.db.rollback()
raise
self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
(self.SCHEMA_PACKAGE_NAME, self.label, cpv))
if self.con.rowcount != 1:
raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found "
" %i matches upon the following select!" % len(rows))
return self.con.fetchone()[0]
def __contains__(self, cpv):
if not self.autocommits:
try:
self.commit()
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
try:
self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
(self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
return self.con.rowcount > 0
def __iter__(self):
if not self.autocommits:
try:
self.commit()
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
try:
self.con.execute("SELECT cpv FROM %s WHERE label=%s" %
(self.SCHEMA_PACKAGE_NAME, self.label))
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
# return [ row[0] for row in self.con.fetchall() ]
for x in self.con.fetchall():
yield x[0]
def iteritems(self):
try:
self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s "
"WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
self.label))
except self._BaseError as e:
raise cache_errors.CacheCorruption(self, cpv, e)
oldcpv = None
l = []
for x, y, v in self.con.fetchall():
if oldcpv != x:
if oldcpv != None:
d = dict(l)
if "_eclasses_" in d:
d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
else:
d["_eclasses_"] = {}
yield cpv, d
l.clear()
oldcpv = x
l.append((y,v))
if oldcpv != None:
d = dict(l)
if "_eclasses_" in d:
d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
else:
d["_eclasses_"] = {}
yield cpv, d
def commit(self):
self.db.commit()
def get_matches(self,match_dict):
query_list = []
for k,v in match_dict.items():
if k not in self._known_keys:
raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance")
v = v.replace("%","\\%")
v = v.replace(".*","%")
query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v)))
if len(query_list):
query = " AND "+" AND ".join(query_list)
else:
query = ''
print("query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query))
try:
self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \
(self.label, query))
except self._BaseError as e:
raise cache_errors.GeneralCacheCorruption(e)
return [ row[0] for row in self.con.fetchall() ]
if sys.hexversion >= 0x3000000:
items = iteritems
keys = __iter__
|
funtoo/portage-funtoo
|
pym/portage/cache/sql_template.py
|
Python
|
gpl-2.0
| 9,396
|
[
"Brian"
] |
907f6fc66d6d12aa8d7bd92adb35772bb528e75c0b6ebb709ca45791e09e69c7
|
from math import sqrt
import numpy as np
from ase.data import covalent_radii
from ase.atoms import Atoms
from ase.calculators.singlepoint import SinglePointCalculator
from ase.io import read, write, string2index
from ase.constraints import FixAtoms
from ase.gui.defaults import read_defaults
from ase.quaternions import Quaternion
class Images:
def __init__(self, images=None):
if images is not None:
self.initialize(images)
def initialize(self, images, filenames=None, init_magmom=False):
self.natoms = len(images[0])
self.nimages = len(images)
if filenames is None:
filenames = [None] * self.nimages
self.filenames = filenames
if hasattr(images[0], 'get_shapes'):
self.Q = np.empty((self.nimages, self.natoms, 4))
self.shapes = images[0].get_shapes()
import os as os
if os.path.exists('shapes'):
shapesfile = open('shapes')
lines = shapesfile.readlines()
shapesfile.close()
if '#{type:(shape_x,shape_y,shape_z), .....,}' in lines[0]:
shape = eval(lines[1])
shapes=[]
for an in images[0].get_atomic_numbers():
shapes.append(shape[an])
self.shapes = np.array(shapes)
else:
print 'shape file has wrong format'
else:
print 'no shapesfile found: default shapes were used!'
else:
self.shapes = None
self.P = np.empty((self.nimages, self.natoms, 3))
self.V = np.empty((self.nimages, self.natoms, 3))
self.E = np.empty(self.nimages)
self.K = np.empty(self.nimages)
self.F = np.empty((self.nimages, self.natoms, 3))
self.M = np.empty((self.nimages, self.natoms))
self.T = np.empty((self.nimages, self.natoms), int)
self.A = np.empty((self.nimages, 3, 3))
self.D = np.empty((self.nimages, 3))
self.Z = images[0].get_atomic_numbers()
self.q = np.empty((self.nimages, self.natoms))
self.pbc = images[0].get_pbc()
self.covalent_radii = covalent_radii
config = read_defaults()
if config['covalent_radii'] is not None:
for data in config['covalent_radii']:
self.covalent_radii[data[0]] = data[1]
warning = False
for i, atoms in enumerate(images):
natomsi = len(atoms)
if (natomsi != self.natoms or
(atoms.get_atomic_numbers() != self.Z).any()):
raise RuntimeError('Can not handle different images with ' +
'different numbers of atoms or different ' +
'kinds of atoms!')
self.P[i] = atoms.get_positions()
self.V[i] = atoms.get_velocities()
if hasattr(self, 'Q'):
self.Q[i] = atoms.get_quaternions()
self.A[i] = atoms.get_cell()
self.D[i] = atoms.get_celldisp().reshape((3,))
if (atoms.get_pbc() != self.pbc).any():
warning = True
try:
self.E[i] = atoms.get_potential_energy()
except RuntimeError:
self.E[i] = np.nan
self.K[i] = atoms.get_kinetic_energy()
try:
self.F[i] = atoms.get_forces(apply_constraint=False)
except RuntimeError:
self.F[i] = np.nan
try:
if init_magmom:
self.M[i] = atoms.get_initial_magnetic_moments()
else:
self.M[i] = atoms.get_magnetic_moments()
except (RuntimeError, AttributeError):
self.M[i] = atoms.get_initial_magnetic_moments()
self.q[i] = atoms.get_initial_charges()
# added support for tags
try:
self.T[i] = atoms.get_tags()
except RuntimeError:
self.T[i] = 0
if warning:
print('WARNING: Not all images have the same bondary conditions!')
self.selected = np.zeros(self.natoms, bool)
self.selected_ordered = []
self.atoms_to_rotate_0 = np.zeros(self.natoms, bool)
self.visible = np.ones(self.natoms, bool)
self.nselected = 0
self.set_dynamic(constraints = images[0].constraints)
self.repeat = np.ones(3, int)
self.set_radii(config['radii_scale'])
def prepare_new_atoms(self):
"Marks that the next call to append_atoms should clear the images."
self.next_append_clears = True
def append_atoms(self, atoms, filename=None):
"Append an atoms object to the images already stored."
assert len(atoms) == self.natoms
if self.next_append_clears:
i = 0
else:
i = self.nimages
for name in ('P', 'V', 'E', 'K', 'F', 'M', 'A', 'T', 'D', 'q'):
a = getattr(self, name)
newa = np.empty( (i+1,) + a.shape[1:], a.dtype )
if not self.next_append_clears:
newa[:-1] = a
setattr(self, name, newa)
self.next_append_clears = False
self.P[i] = atoms.get_positions()
self.V[i] = atoms.get_velocities()
self.A[i] = atoms.get_cell()
self.D[i] = atoms.get_celldisp().reshape((3,))
self.q[i] = atoms.get_initial_charges()
try:
self.E[i] = atoms.get_potential_energy()
except RuntimeError:
self.E[i] = np.nan
self.K[i] = atoms.get_kinetic_energy()
try:
self.F[i] = atoms.get_forces(apply_constraint=False)
except RuntimeError:
self.F[i] = np.nan
try:
self.M[i] = atoms.get_magnetic_moments()
except (RuntimeError, AttributeError):
self.M[i] = np.nan
try:
self.T[i] = atoms.get_tags()
except AttributeError:
if i == 0:
self.T[i] = 0
else:
self.T[i] = self.T[i-1]
self.nimages = i + 1
self.filenames.append(filename)
self.set_dynamic()
return self.nimages
def set_radii(self, scale):
if self.shapes == None:
self.r = self.covalent_radii[self.Z] * scale
else:
self.r = np.sqrt(np.sum(self.shapes**2, axis=1)) * scale
def read(self, filenames, index=-1, filetype=None):
images = []
names = []
for filename in filenames:
i = read(filename, index,filetype)
if not isinstance(i, list):
i = [i]
images.extend(i)
names.extend([filename] * len(i))
self.initialize(images, names)
def import_atoms(self, filename, cur_frame):
if filename:
filename = filename[0]
old_a = self.get_atoms(cur_frame)
imp_a = read(filename, -1)
new_a = old_a + imp_a
self.initialize([new_a], [filename])
def repeat_images(self, repeat):
n = self.repeat.prod()
repeat = np.array(repeat)
self.repeat = repeat
N = repeat.prod()
natoms = self.natoms // n
P = np.empty((self.nimages, natoms * N, 3))
V = np.empty((self.nimages, natoms * N, 3))
M = np.empty((self.nimages, natoms * N))
T = np.empty((self.nimages, natoms * N), int)
F = np.empty((self.nimages, natoms * N, 3))
Z = np.empty(natoms * N, int)
r = np.empty(natoms * N)
dynamic = np.empty(natoms * N, bool)
a0 = 0
for i0 in range(repeat[0]):
for i1 in range(repeat[1]):
for i2 in range(repeat[2]):
a1 = a0 + natoms
for i in range(self.nimages):
P[i, a0:a1] = (self.P[i, :natoms] +
np.dot((i0, i1, i2), self.A[i]))
V[:, a0:a1] = self.V[:, :natoms]
F[:, a0:a1] = self.F[:, :natoms]
M[:, a0:a1] = self.M[:, :natoms]
T[:, a0:a1] = self.T[:, :natoms]
Z[a0:a1] = self.Z[:natoms]
r[a0:a1] = self.r[:natoms]
dynamic[a0:a1] = self.dynamic[:natoms]
a0 = a1
self.P = P
self.V = V
self.F = F
self.Z = Z
self.T = T
self.M = M
self.r = r
self.dynamic = dynamic
self.natoms = natoms * N
self.selected = np.zeros(natoms * N, bool)
self.atoms_to_rotate_0 = np.zeros(self.natoms, bool)
self.visible = np.ones(natoms * N, bool)
self.nselected = 0
def center(self):
""" center each image in the existing unit cell, keeping the cell constant. """
c = self.A.sum(axis=1) / 2.0 - self.P.mean(axis=1)
self.P += c[:, np.newaxis, :]
def graph(self, expr):
""" routine to create the data in ase-gui graphs, defined by the string expr. """
import ase.units as units
code = compile(expr + ',', 'atoms.py', 'eval')
n = self.nimages
def d(n1, n2):
return sqrt(((R[n1] - R[n2])**2).sum())
def a(n1, n2, n3):
v1 = R[n1]-R[n2]
v2 = R[n3]-R[n2]
arg = np.vdot(v1,v2)/(sqrt((v1**2).sum()*(v2**2).sum()))
if arg > 1.0: arg = 1.0
if arg < -1.0: arg = -1.0
return 180.0*np.arccos(arg)/np.pi
def dih(n1, n2, n3, n4):
# vector 0->1, 1->2, 2->3 and their normalized cross products:
a = R[n2]-R[n1]
b = R[n3]-R[n2]
c = R[n4]-R[n3]
bxa = np.cross(b,a)
bxa /= np.sqrt(np.vdot(bxa,bxa))
cxb = np.cross(c,b)
cxb /= np.sqrt(np.vdot(cxb,cxb))
angle = np.vdot(bxa,cxb)
# check for numerical trouble due to finite precision:
if angle < -1: angle = -1
if angle > 1: angle = 1
angle = np.arccos(angle)
if (np.vdot(bxa,c)) > 0: angle = 2*np.pi-angle
return angle*180.0/np.pi
# get number of mobile atoms for temperature calculation
ndynamic = 0
for dyn in self.dynamic:
if dyn: ndynamic += 1
S = self.selected
D = self.dynamic[:, np.newaxis]
E = self.E
s = 0.0
data = []
for i in range(n):
R = self.P[i]
V = self.V[i]
F = self.F[i]
A = self.A[i]
M = self.M[i]
f = ((F * D)**2).sum(1)**.5
fmax = max(f)
fave = f.mean()
epot = E[i]
ekin = self.K[i]
e = epot + ekin
T = 2.0 * ekin / (3.0 * ndynamic * units.kB)
data = eval(code)
if i == 0:
m = len(data)
xy = np.empty((m, n))
xy[:, i] = data
if i + 1 < n:
s += sqrt(((self.P[i + 1] - R)**2).sum())
return xy
def set_dynamic(self, constraints = None):
self.dynamic = np.ones(self.natoms, bool)
if constraints is not None:
for con in constraints:
if isinstance(con,FixAtoms):
self.dynamic[con.index] = False
def write(self, filename, rotations='', show_unit_cell=False, bbox=None, **kwargs):
indices = range(self.nimages)
p = filename.rfind('@')
if p != -1:
try:
slice = string2index(filename[p + 1:])
except ValueError:
pass
else:
indices = indices[slice]
filename = filename[:p]
if isinstance(indices, int):
indices = [indices]
images = [self.get_atoms(i) for i in indices]
if len(filename) > 4 and filename[-4:] in ['.eps', '.png', '.pov']:
write(filename, images,
rotation=rotations, show_unit_cell=show_unit_cell,
bbox=bbox, **kwargs)
else:
write(filename, images, **kwargs)
def get_atoms(self, frame, remove_hidden=False):
atoms = Atoms(positions=self.P[frame],
numbers=self.Z,
magmoms=self.M[0],
tags=self.T[frame],
cell=self.A[frame],
pbc=self.pbc)
if not np.isnan(self.V).any():
atoms.set_velocities(self.V[frame])
# check for constrained atoms and add them accordingly:
if not self.dynamic.all():
atoms.set_constraint(FixAtoms(mask=1-self.dynamic))
# Remove hidden atoms if applicable
if remove_hidden:
atoms = atoms[self.visible]
f = self.F[frame][self.visible]
else:
f = self.F[frame]
atoms.set_calculator(SinglePointCalculator(atoms,
energy=self.E[frame],
forces=f))
return atoms
def delete(self, i):
self.nimages -= 1
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
A = np.empty((self.nimages, 3, 3))
E = np.empty(self.nimages)
P[:i] = self.P[:i]
P[i:] = self.P[i + 1:]
self.P = P
V[:i] = self.V[:i]
V[i:] = self.V[i + 1:]
self.V = V
F[:i] = self.F[:i]
F[i:] = self.F[i + 1:]
self.F = F
A[:i] = self.A[:i]
A[i:] = self.A[i + 1:]
self.A = A
E[:i] = self.E[:i]
E[i:] = self.E[i + 1:]
self.E = E
del self.filenames[i]
def aneb(self):
n = self.nimages
assert n % 5 == 0
levels = n // 5
n = self.nimages = 2 * levels + 3
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
E = np.empty(self.nimages)
for L in range(levels):
P[L] = self.P[L * 5]
P[n - L - 1] = self.P[L * 5 + 4]
V[L] = self.V[L * 5]
V[n - L - 1] = self.V[L * 5 + 4]
F[L] = self.F[L * 5]
F[n - L - 1] = self.F[L * 5 + 4]
E[L] = self.E[L * 5]
E[n - L - 1] = self.E[L * 5 + 4]
for i in range(3):
P[levels + i] = self.P[levels * 5 - 4 + i]
V[levels + i] = self.V[levels * 5 - 4 + i]
F[levels + i] = self.F[levels * 5 - 4 + i]
E[levels + i] = self.E[levels * 5 - 4 + i]
self.P = P
self.V = V
self.F = F
self.E = E
def interpolate(self, m):
assert self.nimages == 2
self.nimages = 2 + m
P = np.empty((self.nimages, self.natoms, 3))
V = np.empty((self.nimages, self.natoms, 3))
F = np.empty((self.nimages, self.natoms, 3))
A = np.empty((self.nimages, 3, 3))
E = np.empty(self.nimages)
T = np.empty((self.nimages, self.natoms), int)
D = np.empty((self.nimages, 3))
P[0] = self.P[0]
V[0] = self.V[0]
F[0] = self.F[0]
A[0] = self.A[0]
E[0] = self.E[0]
T[:] = self.T[0]
for i in range(1, m + 1):
x = i / (m + 1.0)
y = 1 - x
P[i] = y * self.P[0] + x * self.P[1]
V[i] = y * self.V[0] + x * self.V[1]
F[i] = y * self.F[0] + x * self.F[1]
A[i] = y * self.A[0] + x * self.A[1]
E[i] = y * self.E[0] + x * self.E[1]
D[i] = y * self.D[0] + x * self.D[1]
P[-1] = self.P[1]
V[-1] = self.V[1]
F[-1] = self.F[1]
A[-1] = self.A[1]
E[-1] = self.E[1]
D[-1] = self.D[1]
self.P = P
self.V = V
self.F = F
self.A = A
self.E = E
self.T = T
self.D = D
self.filenames[1:1] = [None] * m
if __name__ == '__main__':
import os
os.system('python gui.py')
|
PHOTOX/fuase
|
ase/ase/gui/images.py
|
Python
|
gpl-2.0
| 16,499
|
[
"ASE"
] |
7b6de0a17fbd36620e38be1b01b00b421362d10f6d2de74a947c4c8db57a8e5b
|
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Integrator code for batch processing of full data runs (incorporating parts of earlier analysis scripts)
# Data interfacing
from read_data import *
from read_param import *
# Pre-existing analysis scripts
from nematic_analysis import *
#from glob import glob
# This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...)
basefolder='/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#basefolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
#outfolder= '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/J_1_0_v0_1_0/'
outfolder = '/home/silke/Documents/CurrentProjects/Rastko/nematic/data/'
#v0val=['0.3','0.5','0.7','1.5','2.0','3.0','7.0','10.0']
v0val=['3.0','5.0','7.0','10.0']
sigma=1
rval=['30.0']
nstep=10100000
nsave=5000
nsnap=int(nstep/nsave)
#skip=835
skip=0
startvtk=1500
for r in rval:
for v0 in v0val:
#param = Param(basefolder)
files = sorted(glob(basefolder+'R_'+ r+ '/v0_' + v0 + '/sphere_*.dat'))[skip:]
defects=np.zeros((len(files),12))
ndefect=np.zeros((len(files),1))
u=0
for f in files:
print f
outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_data' + str(u-startvtk)+'.vtk'
if u<startvtk:
defects0,ndefect0=getDefects(f,float(r),sigma,outname,False,False)
else:
defects0,ndefect0=getDefects(f,float(r),sigma,outname,False,True)
outname = '.'.join((f).split('.')[:-1]) + '_defects.vtk'
outname =outfolder +'R_'+ r+ '/v0_' + v0 + '/frame_defects' + str(u-startvtk)+'.vtk'
print outname
writeDefects(defects0,ndefect0,outname)
defects[u,0:3]=defects0[0,:]
defects[u,3:6]=defects0[1,:]
defects[u,6:9]=defects0[2,:]
defects[u,9:12]=defects0[3,:]
ndefect[u]=ndefect0
u+=1
outfile2=outfolder + 'defects_v0_' + v0 + '_R_'+ r+ '.dat'
np.savetxt(outfile2,np.concatenate((ndefect,defects),axis=1),fmt='%12.6g', header='ndefect defects')
|
sknepneklab/SAMoS
|
analysis/batch_nematic/batch_analyze_nematic_R30c.py
|
Python
|
gpl-3.0
| 2,511
|
[
"VTK"
] |
ea830e03ac8ad689bfe53a06b3dfd8d7faf7bff53ee60b19441d6fa5d75fa74b
|
"""
Instructor Dashboard Views
"""
import datetime
import logging
import uuid
from functools import reduce
from unittest.mock import patch
import pytz
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseServerError
from django.urls import reverse
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_POST
from edx_proctoring.api import does_backend_support_onboarding
from edx_when.api import is_enabled_for_course
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from common.djangoapps.course_modes.models import CourseMode, CourseModesArchive
from common.djangoapps.edxmako.shortcuts import render_to_response
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import (
CourseFinanceAdminRole,
CourseInstructorRole,
CourseSalesAdminRole,
CourseStaffRole
)
from common.djangoapps.util.json_request import JsonResponse
from lms.djangoapps.bulk_email.api import is_bulk_email_feature_enabled
from lms.djangoapps.certificates import api as certs_api
from lms.djangoapps.certificates.models import (
CertificateGenerationConfiguration,
CertificateGenerationHistory,
CertificateInvalidation,
CertificateStatuses,
CertificateWhitelist,
GeneratedCertificate
)
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.courseware.courses import get_course_by_id, get_studio_url
from lms.djangoapps.courseware.module_render import get_module_by_usage_id
from lms.djangoapps.discussion.django_comment_client.utils import available_division_schemes, has_forum_access
from lms.djangoapps.grades.api import is_writable_gradebook_enabled
from openedx.core.djangoapps.course_groups.cohorts import DEFAULT_COHORT_NAME, get_course_cohorts, is_course_cohorted
from openedx.core.djangoapps.django_comment_common.models import FORUM_ROLE_ADMINISTRATOR, CourseDiscussionSettings
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.verified_track_content.models import VerifiedTrackCohortedCourse
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.lib.url_utils import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlBlock
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from .. import permissions
from ..toggles import data_download_v2_is_enabled
from .tools import get_units_with_due_date, title_or_url
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and user.is_authenticated and user.has_perm(permissions.VIEW_DASHBOARD, course.id))
def show_analytics_dashboard_message(course_key):
"""
Defines whether or not the analytics dashboard URL should be displayed.
Arguments:
course_key (CourseLocator): The course locator to display the analytics dashboard message on.
"""
if hasattr(course_key, 'ccx'):
ccx_analytics_enabled = settings.FEATURES.get('ENABLE_CCX_ANALYTICS_DASHBOARD_URL', False)
return settings.ANALYTICS_DASHBOARD_URL and ccx_analytics_enabled
return settings.ANALYTICS_DASHBOARD_URL
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id): # lint-amnesty, pylint: disable=too-many-statements
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error("Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=None)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
'data_researcher': request.user.has_perm(permissions.CAN_RESEARCH, course_key),
}
if not request.user.has_perm(permissions.VIEW_DASHBOARD, course_key):
raise Http404()
is_white_label = CourseMode.is_white_label(course_key) # lint-amnesty, pylint: disable=unused-variable
reports_enabled = configuration_helpers.get_value('SHOW_ECOMMERCE_REPORTS', False) # lint-amnesty, pylint: disable=unused-variable
sections = []
if access['staff']:
sections.extend([
_section_course_info(course, access),
_section_membership(course, access),
_section_cohort_management(course, access),
_section_discussions_management(course, access),
_section_student_admin(course, access),
])
if access['data_researcher']:
sections.append(_section_data_download(course, access))
analytics_dashboard_message = None
if show_analytics_dashboard_message(course_key) and (access['staff'] or access['instructor']):
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{}/courses/{}'.format(settings.ANALYTICS_DASHBOARD_URL, str(course_key))
link_start = HTML("<a href=\"{}\" rel=\"noopener\" target=\"_blank\">").format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = Text(analytics_dashboard_message).format(
link_start=link_start, link_end=HTML("</a>"), analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False # lint-amnesty, pylint: disable=unused-variable
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
"Course %s has %s course modes with payment options. Course must only have "
"one paid course mode to enable eCommerce options.",
str(course_key), len(paid_modes)
)
if access['instructor'] and is_enabled_for_course(course_key):
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if is_bulk_email_feature_enabled(course_key) and (access['staff'] or access['instructor']):
sections.append(_section_send_email(course, access))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
user_has_access = any([
request.user.is_staff,
CourseStaffRole(course_key).has_user(request.user),
CourseInstructorRole(course_key).has_user(request.user)
])
course_has_special_exams = course.enable_proctored_exams or course.enable_timed_exams
can_see_special_exams = course_has_special_exams and user_has_access and settings.FEATURES.get(
'ENABLE_SPECIAL_EXAMS', False)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
# Note: This is hidden for all CCXs
certs_enabled = CertificateGenerationConfiguration.current().enabled and not hasattr(course_key, 'ccx')
if certs_enabled and access['instructor']:
sections.append(_section_certificates(course))
openassessment_blocks = modulestore().get_items(
course_key, qualifiers={'category': 'openassessment'}
)
# filter out orphaned openassessment blocks
openassessment_blocks = [
block for block in openassessment_blocks if block.parent is not None
]
if len(openassessment_blocks) > 0 and access['staff']:
sections.append(_section_open_response_assessment(request, course, openassessment_blocks, access))
disable_buttons = not CourseEnrollment.objects.is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse(
'generate_certificate_exceptions',
kwargs={'course_id': str(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse(
'generate_bulk_certificate_exceptions',
kwargs={'course_id': str(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': str(course_key)}
)
certificate_invalidation_view_url = reverse(
'certificate_invalidation_view',
kwargs={'course_id': str(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = str(course.id)
proctoring_provider = course.proctoring_provider
escalation_email = None
if proctoring_provider == 'proctortrack':
escalation_email = course.proctoring_escalation_email
from edx_proctoring.api import is_backend_dashboard_available
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': course_key,
'escalation_email': escalation_email,
'show_dashboard': is_backend_dashboard_available(course_key),
'show_onboarding': does_backend_support_onboarding(course.proctoring_provider),
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'is_self_paced': course.self_paced,
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = CourseKey.from_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name_with_default,
'course_org': course.display_org_with_default,
'course_number': course.display_number_with_default,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': course.start,
'end_date': course.end,
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': str(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if show_analytics_dashboard_message(course_key):
# dashboard_link is already made safe in _get_dashboard_link
dashboard_link = _get_dashboard_link(course_key)
# so we can use Text() here so it's not double-escaped and rendering HTML on the front-end
message = Text(
_("Enrollment data is now available in {dashboard_link}.")
).format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
try:
sorted_cutoffs = sorted(list(course.grade_cutoffs.items()), key=lambda i: i[1], reverse=True)
advance = lambda memo, letter_score_tuple: "{}: {}, ".format(letter_score_tuple[0], letter_score_tuple[1]) \
+ memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': str(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': str(course_key)}),
'upload_student_csv_button_url': reverse(
'register_and_enroll_students',
kwargs={'course_id': str(course_key)}
),
'modify_beta_testers_button_url': reverse(
'bulk_beta_modify_access',
kwargs={'course_id': str(course_key)}
),
'list_course_role_members_url': reverse(
'list_course_role_members',
kwargs={'course_id': str(course_key)}
),
'modify_access_url': reverse('modify_access', kwargs={'course_id': str(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': str(course_key)}),
'update_forum_role_membership_url': reverse(
'update_forum_role_membership',
kwargs={'course_id': str(course_key)}
),
'is_reason_field_enabled': configuration_helpers.get_value('ENABLE_MANUAL_ENROLLMENT_REASON_FIELD', False)
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
ccx_enabled = hasattr(course_key, 'ccx')
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': str(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': str(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': str(course_key)}),
'verified_track_cohorting_url': reverse(
'verified_track_cohorting', kwargs={'course_key_string': str(course_key)}
),
}
return section_data
def _section_discussions_management(course, access): # lint-amnesty, pylint: disable=unused-argument
""" Provide data for the corresponding discussion management section """
course_key = course.id
enrollment_track_schemes = available_division_schemes(course_key)
section_data = {
'section_key': 'discussions_management',
'section_display_name': _('Discussions'),
'is_hidden': (not is_course_cohorted(course_key) and
CourseDiscussionSettings.ENROLLMENT_TRACK not in enrollment_track_schemes),
'discussion_topics_url': reverse('discussion_topics', kwargs={'course_key_string': str(course_key)}),
'course_discussion_settings': reverse(
'course_discussions_settings',
kwargs={'course_key_string': str(course_key)}
),
}
return section_data
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = CourseEnrollment.objects.is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_enrollment_status_url': reverse(
'get_student_enrollment_status',
kwargs={'course_id': str(course_key)}
),
'get_student_progress_url_url': reverse(
'get_student_progress_url',
kwargs={'course_id': str(course_key)}
),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': str(course_key)}),
'reset_student_attempts_url': reverse(
'reset_student_attempts',
kwargs={'course_id': str(course_key)}
),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': str(course_key)}),
'override_problem_score_url': reverse(
'override_problem_score',
kwargs={'course_id': str(course_key)}
),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': str(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': str(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': str(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse(
'list_entrance_exam_instructor_tasks',
kwargs={'course_id': str(course_key)}
),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': str(course_key)}),
}
if is_writable_gradebook_enabled(course_key) and settings.WRITABLE_GRADEBOOK_URL:
section_data['writable_gradebook_url'] = '{}/{}'.format(settings.WRITABLE_GRADEBOOK_URL, str(course_key))
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), str(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': str(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': str(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': str(course.id)}),
'show_student_extensions_url': reverse(
'show_student_extensions',
kwargs={'course_id': str(course.id)}
),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_key = 'data_download_2' if data_download_v2_is_enabled() else 'data_download'
section_data = {
'section_key': section_key,
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': str(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': str(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': str(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': str(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': str(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': str(course_key)}),
'list_proctored_results_url': reverse(
'get_proctored_exam_results', kwargs={'course_id': str(course_key)}
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': str(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': str(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': str(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': str(course_key)}),
'course_has_survey': True if course.course_survey_name else False, # lint-amnesty, pylint: disable=simplifiable-if-expression
'course_survey_results_url': reverse(
'get_course_survey_results', kwargs={'course_id': str(course_key)}
),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': str(course_key)}),
'export_ora2_submission_files_url': reverse(
'export_ora2_submission_files', kwargs={'course_id': str(course_key)}
),
'export_ora2_summary_url': reverse('export_ora2_summary', kwargs={'course_id': str(course_key)}),
}
if not access.get('data_researcher'):
section_data['is_hidden'] = True
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlBlock for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlBlock is only being used to generate a nice text editor.
html_module = HtmlBlock(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": str(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(str(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().hex
)
cohorts = []
if is_course_cohorted(course_key):
cohorts = get_course_cohorts(course)
course_modes = []
if not VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key):
course_modes = CourseMode.modes_for_course(course_key, include_expired=True, only_selectable=False)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': str(course_key)}),
'editor': email_editor,
'cohorts': cohorts,
'course_modes': course_modes,
'default_cohort_name': DEFAULT_COHORT_NAME,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': str(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': str(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': str(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{}/courses/{}'.format(settings.ANALYTICS_DASHBOARD_URL, str(course_key))
link = HTML("<a href=\"{0}\" rel=\"noopener\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': str(course.id),
}
return section_data
def _section_open_response_assessment(request, course, openassessment_blocks, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
ora_items = []
parents = {}
for block in openassessment_blocks:
block_parent_id = str(block.parent)
result_item_id = str(block.location)
if block_parent_id not in parents:
parents[block_parent_id] = modulestore().get_item(block.parent)
assessment_name = _("Team") + " : " + block.display_name if block.teams_enabled else block.display_name
ora_items.append({
'id': result_item_id,
'name': assessment_name,
'parent_id': block_parent_id,
'parent_name': parents[block_parent_id].display_name,
'staff_assessment': 'staff-assessment' in block.assessment_steps,
'url_base': reverse('xblock_view', args=[course.id, block.location, 'student_view']),
'url_grade_available_responses': reverse('xblock_view', args=[course.id, block.location,
'grade_available_responses_view']),
})
openassessment_block = openassessment_blocks[0]
block, __ = get_module_by_usage_id(
request, str(course_key), str(openassessment_block.location),
disable_staff_debug_info=True, course=course
)
section_data = {
'fragment': block.render('ora_blocks_listing_view', context={
'ora_items': ora_items,
'ora_item_view_enabled': settings.FEATURES.get('ENABLE_XBLOCK_VIEW_ENDPOINT', False)
}),
'section_key': 'open_response_assessment',
'section_display_name': _('Open Responses'),
'access': access,
'course_id': str(course_key),
}
return section_data
def is_ecommerce_course(course_key):
"""
Checks if the given course is an e-commerce course or not, by checking its SKU value from
CourseMode records for the course
"""
sku_count = len([mode.sku for mode in CourseMode.modes_for_course(course_key) if mode.sku])
return sku_count > 0
|
eduNEXT/edunext-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 33,422
|
[
"VisIt"
] |
ede08f20c3270d13ba616525c16da85a6bc5b33b9fcaa2da2ea9b79c7c31c4b7
|
#Character level RNN
import numpy as np
file = open('alice.txt' , 'r').read()
chars = list(set(file))
file_size , char_size = len(file) , len(chars)
print "File has %d characters and %d unique characters" %(file_size,char_size)
#Builds the Vector model
char_ix = {ch:i for i,ch in enumerate(chars)}
ix_char = {i:ch for i,ch in enumerate(chars)}
# Example Vector for 'x'
vector_for_char = np.zeros((char_size , 1))
vector_for_char[char_ix['x']] = 1
# print the sample vector
print vector_for_char.ravel()
#param
neuron_size = 100
seq_length = 25 # number of steps to unroll RNN
learn_rate = 1e-1
#weights
Wxh = np.random.randn(neuron_size , char_size)*0.01
Whh = np.random.randn(neuron_size , neuron_size)*0.01
Why = np.random.randn(char_size , neuron_size)*0.01
bh = np.zeros((neuron_size , 1))
by = np.zeros((char_size,1))
# Creating a sample text model without any optimization
def text_sample(h , start_ix , n):
'''
h -> memory state
start_ix -> start char
'''
x = np.zeros((char_size , 1))
x[start_ix] = 1
ixs = []
for t in xrange(n):
h = np.tanh(np.dot(Wxh , x) + np.dot(Whh ,h) + bh)
y = np.dot(Why , h) + by
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(char_size) , p = p.ravel())
x = np.zeros((char_size , 1))
x[ix] = 1
ixs.append(ix)
txt = ''.join(ix_char[ix] for ix in ixs)
print '------ \n %s \n ------'%(txt,)
hprev = np.zeros((neuron_size , 1)) # reset the memory state
text_sample(hprev , char_ix['a'] , 2000)
# The above model makes no sense
#Now defining the loss function
def lossFunc(inputs , targets , hprev):
'''
input , target -> list of integers
hprev -> initial memory state of hidden neurons (we start with zeros)
returns the loss , gradients on model parameters , last neuron hidden state
'''
n = len(inputs)
xs , hs , ys , ps = {} , {}, {} ,{}
hs[-1] = np.copy(hprev)
loss= 0
#Forward Pass
for t in xrange(n):
xs[t] = np.zeros((char_size,1))
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh , xs[t] + np.dot(Whh , hs[t-1] + bh))) #Hidden neuron state
ys = np.dot(Why , hs[t]) + by # Probalibity fro next char
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))
loss += -np.log(ps[t][targets[t] , 0]) #softmax
# Backward Pass
dWxh ,dWhh , dWhy = np.zeros_like(Wxh) , np.zeros_like(Whh) , np.zeros_like(Why)
dbh , dby = np.zeros_like(bh) , np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
# Going backwards so reversed( xrange() ) **
for t in reversed(xrange(n)):
dy = np.copy[ps[t]]
dy[targets[t]] -= 1 # Backprop into y
dWhy += np.dot(dy,hs[t].T)
|
PadamSethia/shakeyshakespeare
|
main.py
|
Python
|
mit
| 2,679
|
[
"NEURON"
] |
58ec70b0d955f7a7ec7f4047e205df3c80d0a205bc26a39ba97ed3944f685acb
|
# Moogul.py: MOOSE Graphics 3D.
# Copyright (C) Upinder S. Bhalla NCBS 2022
# This program is licensed under the GNU Public License version 3.
#
import moose
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import vpython as vp
import time
from packaging import version
#from mpl_toolkits.mplot3d.art3d import Line3DCollection
NUM_CMAP = 64
SCALE_SCENE = 64
bgvector = vp.vector(0.7, 0.8, 0.9) # RGB
bgDict = {'default': bgvector, 'black': vp.color.black, 'white': vp.color.white, 'cyan': vp.color.cyan, 'grey': vp.vector( 0.5, 0.5, 0.5 ) }
sleepTimes = [0.0, 0.0005, 0.001, 0.002, 0.003, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]
def bgLookup( bg ):
col = bgDict.get( bg )
if not col:
return bgvector
return col
class MoogulError( Exception ):
def __init__( self, value ):
self.value = value
def __str__( self ):
return repr( self.value )
class MooView:
''' The MooView class is a window in which to display one or more
neurons, using the MooNeuron and MooReacSystemclass.'''
viewIdx = 0
origScene = None
rgb = []
viewList = []
consolidatedTitle = ""
def __init__( self, swx = 10, swy = 10, hideAxis = True, title = "view", colormap = 'jet'
):
self.viewIdx = MooView.viewIdx
MooView.viewIdx += 1
MooView.viewList.append( self )
MooView.consolidatedTitle += title + " "
self.title = title
self.swx = swx
self.swy = swy
self.drawables_ = []
self.sensitivity = 0.05 # radians rotation, and other adjustments
self.sleep = 0.005 # Seconds to sleep per frame
self.colormap = colormap
self.colorbar = None
self.valMin = 0.0
self.valMmax = 1.0
self.plotFlag_ = True
@staticmethod
def replayLoop():
if len( MooView.viewList ) == 0:
return
numFrames = MooView.viewList[0].numFrames()
while MooView.viewList[0].replayButton.text == "Stop Replay":
for idx in range( numFrames ):
for view in MooView.viewList:
view.replaySnapshot( idx )
vp.sleep( MooView.viewList[0].sleep )
vp.sleep( 0.5 ) # Pause 0.5 sec between replays
def notifySimulationEnd( self ):
if self.viewIdx == 0:
self.replayButton.disabled = False
def numFrames( self ):
if len( self.drawables_ ) == 0:
return 0
return len( self.drawables_[0].snapshot )
def addDrawable( self, n ):
self.drawables_.append( n )
if len( self.drawables_ ) == 1:
self.valMin = n.valMin
self.valMax = n.valMax
# self.scene.objects also maintains list.
def toggleReplay( self ):
if self.replayButton.text == "Start Replay":
self.replayButton.text = "Stop Replay"
self.replayButton.background = vp.color.red
MooView.replayLoop()
else:
self.replayButton.text = "Start Replay"
self.replayButton.background = vp.color.white
def setSleepTime( self ):
idx = int( round( self.sleepSlider.value ) )
self.sleep = sleepTimes[idx]
self.sleepLabel.text = " Frame dt = {:1.3f} sec".format( self.sleep )
def updateAxis( self ):
if not self.colorbar:
return
forward = vp.norm( self.scene.forward )
screenUp = vp.norm( self.scene.up )
right = vp.norm( vp.cross( forward, screenUp ) )
up = vp.norm( vp.cross( right, forward ) )
dx = 0.8
x = vp.vector( dx, 0.0, 0.0 )
y = vp.vector( 0.0, dx, 0.0 )
z = vp.vector( 0.0, 0.0, dx )
self.xAx.axis = vp.vector( x.dot( right ), x.dot( up ), 0.0 )
self.yAx.axis = vp.vector( y.dot( right ), y.dot( up ), 0.0 )
self.zAx.axis = vp.vector( z.dot( right ), z.dot( up ), 0.0 )
self.axisLength.text = "{:.2f} <i>u</i>m".format( dx * 1e6*self.scene.range * self.colorbar.width / self.scene.width )
def innerColorbar( self, title, bg ):
barWidth = SCALE_SCENE * 1.5
if ( bgLookup(bg).mag < 1 ):
barTextColor = vp.color.white
else:
barTextColor = vp.color.black
self.colorbar = vp.canvas( title = title, width = barWidth, height = self.swy * SCALE_SCENE, background = bgLookup(bg), align = 'left', range = 1, autoscale = False )
#self.colorbar = vp.canvas( title = title, width = barWidth, height = self.swy * SCALE_SCENE, background = vp.color.cyan, align = 'left', range = 1, autoscale = False )
self.colorbar.userzoom = False
self.colorbar.userspin = False
self.colorbar.userpan = False
height = 0.10
width = 5
axOrigin = vp.vector( 0, -5.5, 0 )
for idx, rgb in enumerate( self.rgb ):
cbox = vp.box( canvas = self.colorbar, pos = vp.vector( 0, height * (idx - 26), 0), width = width, height = height, color = rgb )
barName = self.title.replace( ' ', '\n' )
self.barName = vp.label( canvas = self.colorbar, align = 'left', pixel_pos = True, pos = vp.vector( 2, (self.swy - 0.32) * SCALE_SCENE, 0), text = barName, height = 15, color = barTextColor, box = False, opacity = 0 )
self.barMin = vp.label( canvas = self.colorbar, align = 'center', pixel_pos = True, pos = vp.vector( barWidth/2, self.swy * SCALE_SCENE * 0.22, 0), text = "{:.3f}".format(self.valMin), height = 12, color = barTextColor, box = False, opacity = 0 )
self.barMax = vp.label( canvas = self.colorbar, align = 'center', pixel_pos = True, pos = vp.vector( barWidth/2, (self.swy - 1.2) * SCALE_SCENE, 0), text = "{:.3f}".format(self.valMax), height = 12, color = barTextColor, box = False, opacity = 0 )
self.xAx = vp.cylinder( canvas = self.colorbar, pos = axOrigin, axis = vp.vector( 0.8, 0, 0 ), radius = 0.04, color = vp.color.red )
self.yAx = vp.cylinder( canvas = self.colorbar, pos = axOrigin, axis = vp.vector( 0, 0.8, 0 ), radius = 0.04, color = vp.color.green )
self.zAx = vp.cylinder( canvas = self.colorbar, pos = axOrigin, axis = vp.vector( 0, 0, 0 ), radius = 0.04, color = vp.color.blue )
self.axisLength = vp.label( pos = axOrigin + vp.vector(0, 1, 0), text = "1.00 <i>u</i>m", color = barTextColor, box = False )
def makeColorbar( self, doOrnaments = True, colorscale = 'jet', bg = 'default' ):
title = None
if doOrnaments:
title = MooView.consolidatedTitle + "\n"
self.innerColorbar( title, bg )
if doOrnaments:
self.timeLabel = vp.wtext( text = "Time = 0.000 sec", pos = self.colorbar.title_anchor )
self.sleepLabel = vp.wtext( text = " Frame dt = 0.005 sec", pos = self.colorbar.title_anchor )
self.sleepSlider = vp.slider( pos = self.colorbar.title_anchor, length = 200, bind = self.setSleepTime, min = 0, max = len( sleepTimes ) -1, value = min( len( sleepTimes ), 2 ) )
self.replayButton = vp.button( text = "Start Replay", pos = self.colorbar.title_anchor, bind=self.toggleReplay, disabled = True )
self.colorbar.append_to_title("\n")
def pickObj( self ):
obj = self.scene.mouse.pick
if obj == None:
return
elmPath = self.innerPickObj( obj )
if elmPath:
self.handlePick( elmPath )
return
elif self.viewIdx == 0:
for view in MooView.viewList[1:]:
if view.colorbar == None:
elmPath = view.innerPickObj( obj )
if elmPath:
self.handlePick( elmPath )
return
print( "Object {} not found on view {}".format( obj, self.title ) )
def innerPickObj( self, obj ):
for dr in self.drawables_:
elmPath = dr.findDisplayObject( obj )
if elmPath:
return (elmPath[0], elmPath[1], dr)
return None
def handlePick( self, elmPath ):
path, field, drawable = elmPath
if self.plotFlag_:
drawable.plotHistory( path, field, self.graph, self.graphPlot1 )
else:
print( path, field )
def makeScene( self, mergeDisplays, bg = 'default' ):
if self.viewIdx == 0:
MooView.origScene = vp.canvas( width = self.swx * SCALE_SCENE, height = self.swy * SCALE_SCENE, background = bgLookup( bg ), align = 'left', autoscale = True )
self.scene = MooView.origScene
self.scene.bind( 'keydown', self.moveView )
self.scene.bind( 'keydown', self.updateAxis )
self.scene.bind( 'mousedown', self.pickObj )
#self.flatbox = vp.box( width = 10, height = 6 )
elif mergeDisplays:
self.scene = MooView.origScene
else:
self.scene = vp.canvas( width = self.swx * SCALE_SCENE, height = self.swy * SCALE_SCENE, background = bgvector, align = 'left', autoscale = True )
self.scene.bind( 'keydown', self.moveView )
self.scene.bind( 'keydown', self.updateAxis )
self.scene.bind( 'mousedown', self.pickObj )
'''
self.xAx2 = vp.cylinder( canvas = self.scene, pos = vp.vector( 0, 0, 0), axis = vp.vector( 1e-5, 0, 0 ), radius = 0.2e-6, color = vp.color.red )
self.yAx2 = vp.cylinder( canvas = self.scene, pos = vp.vector( 0, 0, 0), axis = vp.vector( 0, 1e-5, 0 ), radius = 0.2e-6, color = vp.color.green )
self.zAx2 = vp.cylinder( canvas = self.scene, pos = vp.vector( 0, 0, 0), axis = vp.vector( 0, 0, 1e-5 ), radius = 0.2e-6, color = vp.color.blue )
'''
self.scene.bind( 'mousedown mousemove mouseup', self.updateAxis )
def firstDraw( self, mergeDisplays, rotation=0.0, elev=0.0, azim=0.0, center = [0.0, 0,0, 0.0], colormap = 'jet', bg = 'default' ):
self.colormap = colormap
cmap = plt.get_cmap( self.colormap, lut = NUM_CMAP )
self.rgb = [ list2vec(cmap(i)[0:3]) for i in range( NUM_CMAP ) ]
doOrnaments = (self.viewIdx == 0)
if doOrnaments or not mergeDisplays:
self.makeColorbar( doOrnaments = doOrnaments, bg = bg )
self.makeScene( mergeDisplays, bg = bg )
if rotation == 0.0:
self.doRotation = False
self.rotation = 0.1 # default rotation per frame, in radians.
else:
self.doRotation = True
self.rotation = rotation # arg units: radians/frame
for i in self.drawables_:
i.rgb = self.rgb
i.drawForTheFirstTime( self.scene )
if doOrnaments or not mergeDisplays:
if len( center ) == 3:
self.scene.center = list2vec( center )
else:
self.doAutoscale()
self.updateAxis()
if self.viewIdx == (MooView.viewIdx-1):
self.graph = vp.graph( title = "Graph", xtitle = "Time (s)", ytitle = " Units here", width = 700, fast=False, align = "left" )
self.graphPlot1 = vp.gcurve( color = vp.color.blue, interval=-1)
#self.graphPlot1.data = [[0,0], [1,1],[2,0],[3,4],[4,0], [5,1]]
#self.graphPlot1.plot( [[0,0], [1,1],[2,0],[3,4],[4,0]] )
def updateValues( self, simTime ):
for i in self.drawables_:
i.updateValues( simTime )
if self.doRotation and abs( self.rotation ) < 2.0 * 3.14 / 3.0:
self.scene.forward = vp.rotate( self.scene.forward, angle = self.rotation, axis = self.scene.up )
self.updateAxis()
if self.viewIdx == 0:
self.timeLabel.text = "Time = {:7.3f} sec".format( simTime )
vp.sleep( self.sleep )
def replaySnapshot( self, idx ):
for i in self.drawables_:
simTime = i.replaySnapshot( idx )
if self.viewIdx == 0:
self.timeLabel.text = "Time = {:7.3f} sec".format( simTime )
self.updateAxis()
def doAutoscale( self ):
if self.drawables_[0].dataWrapper_.numObj() == 0:
print( "Warning: No values to display in Moogli view ", self.title )
return
cmin = self.drawables_[0].dataWrapper_.coordMin_
cmax = self.drawables_[0].dataWrapper_.coordMax_
diamax = max( self.drawables_[0].dataWrapper_.getCoords()[:,6] )
v0 = vp.vector( cmin[0], cmin[1], cmin[2] )
v1 = vp.vector( cmax[0], cmax[1], cmax[2] )
#self.scene.camera.axis = self.scene.forward * vp.mag(v1 - v0) * 4
self.scene.center = (v0 + v1 ) / 2.0
self.scene.range = (diamax + vp.mag(v0 - v1 ) ) / 1.5
def moveView(self, event):
camAxis = self.scene.camera.axis
camDist = vp.mag(self.scene.center - self.scene.camera.pos)
dtheta = self.sensitivity
up = self.scene.up
if event.key in ["up", "k", "K"]:
self.scene.camera.pos -= up.norm() * dtheta * camDist
return
if event.key in ["down", "j", "J"]:
self.scene.camera.pos += up.norm() * dtheta * camDist
return
if event.key in ["right", "l", "L"]:
self.scene.camera.pos += vp.norm(up.cross(camAxis)) * dtheta * camDist
return
if event.key in ["left", "h", "H"]:
self.scene.camera.pos -= vp.norm(up.cross(camAxis)) * dtheta * camDist
return
if event.key in [".", ">"]: # Get closer, by ratio
ctr = self.scene.center
self.scene.camera.pos = ctr - camAxis/( 1+dtheta )
self.scene.camera.axis = ctr - self.scene.camera.pos
return
if event.key in [",", "<"]: # Get further
ctr = self.scene.center
self.scene.camera.pos = ctr - camAxis*( 1+dtheta )
self.scene.camera.axis = ctr - self.scene.camera.pos
return
if event.key == "p": # pitch: Rotate camera around ctr-horiz axis
self.scene.forward = vp.rotate( self.scene.forward, angle = dtheta, axis = vp.cross( self.scene.forward, self.scene.up ) )
return
if event.key == "P":
self.scene.forward = vp.rotate( self.scene.forward, angle = -dtheta, axis = vp.cross( self.scene.forward, self.scene.up ) )
return
if event.key == "y": # yaw: Rotate camera around ctr - up axis.
self.scene.forward = vp.rotate( self.scene.forward, angle = dtheta, axis = self.scene.up )
return
return
if event.key == "Y":
self.scene.forward = vp.rotate( self.scene.forward, angle = -dtheta, axis = self.scene.up )
return
if event.key == "r": # Roll, that is, change the 'up' vector
self.scene.camera.rotate( angle = dtheta, axis = camAxis, origin = self.scene.camera.pos )
return
if event.key == "R":
self.scene.camera.rotate( angle = -dtheta, axis = camAxis, origin = self.scene.camera.pos )
return
if event.key == "d": # Diameter scaling down
for dbl in self.drawables_:
dbl.diaScale *= 1.0 - self.sensitivity * 4
dbl.updateDiameter()
return
if event.key == "D":
for dbl in self.drawables_:
dbl.diaScale *= 1.0 + self.sensitivity * 4
dbl.updateDiameter()
return
if event.key == "s": # Scale down sleep time, make it faster.
self.sleep *= 1 - self.sensitivity
return
if event.key == "S": # Scale up sleep time, make it slower.
self.sleep *= 1 + self.sensitivity
return
if event.key == "a": # autoscale to fill view.
self.doAutoscale()
return
if event.key == "g":
self.hideAxis = not self.hideAxis
# show/hide the axis here.
if event.key == "t": # Turn on/off twisting/autorotate
self.doRotation = not self.doRotation
if event.key == "?": # Print out help for these commands
self.printMoogulHelp()
def printMoogulHelp( self ):
print( '''
Key bindings for Moogul:
Up or k: pan object up
Down or j: pan object down
left or h: pan object left.
right or l: pan object right
. or >: Zoom in: make object appear bigger
, or <: Zoom out: make object appear smaller
a: Autoscale to fill view
p: Pitch down
P: Pitch up
y: Yaw counterclockwise
Y: Yaw counterclockwise
d: diminish diameter
D: Distend diameter.
g: Toggle visibility of grid
t: Toggle turn (rotation along long axis of cell)
?: Print this help page.
''')
#####################################################################
def list2vec( arg ):
return vp.vector( arg[0], arg[1], arg[2] )
class DataWrapper:
''' Class for interfacing between moogli and the data source. Currently
implemented for MOOSE and for nsdf reader.
'''
def __init__( self, field ):
self.coordMin_ = np.zeros( 3 )
self.coordMax_ = np.ones( 3 )
self.field_ = field
self.objList_ = []
def getValues( self ):
return np.zeros( 1 )
def numObj( self ):
return len( self.objList_ )
def getCoords( self ):
return np.array( [] )
def getMinMax( self ):
nmin = np.amin(self.coords_, axis = 0)
self.coordMin_ = np.amin( np.array( [nmin[0:3], nmin[3:6]] ), axis = 0 )
nmax = np.amax(self.coords_, axis = 0)
self.coordMax_ = np.amax( np.array( [nmax[0:3], nmax[3:6]] ), axis = 0 )
def objPathFromIndex( self, idx ):
if idx < len( self.objList_ ):
return self.objList_[idx].path
return None
def advance( self, simTime ):
# Checks that the simTime has crossed upcomingTime
return True # used for multi timestep cases.
def getHistory( self, path, field ):
# stub function. Derived classes fill it in and return useful values
return [0, 1, 2, 3], [ 1, 4, 9, 16]
class MooDrawable:
''' Base class for drawing things'''
def __init__( self,
dataWrapper,
colormap,
lenScale,
diaScale,
fieldScale,
autoscale,
valMin, valMax
):
self.dataWrapper_ = dataWrapper
self.lenScale = lenScale
self.diaScale = diaScale
self.fieldScale = fieldScale
self.colormap = colormap
self.autoscale = autoscale
self.valMin = valMin
self.valMax = valMax
self.segments = []
self.snapshot = []
#cmap = plt.get_cmap( self.colormap, lut = NUM_CMAP )
#self.rgb = [ list2vec(cmap(i)[0:3]) for i in range( NUM_CMAP ) ]
def updateValues( self, simTime ):
if self.dataWrapper_.advance( simTime ):
self.val = self.dataWrapper_.getValues() * self.fieldScale
else:
return
if self.autoscale:
valMin = min( self.val )
valMax = max( self.val )
else:
valMin = self.valMin
valMax = self.valMax
scaleVal = NUM_CMAP * (self.val - valMin) / (valMax - valMin)
#indices = scaleVal.ndarray.astype( int )
indices = np.maximum( np.minimum( scaleVal, NUM_CMAP-0.5), 0.0).astype(int)
# Have to figure how this will work with multiple update rates.
self.snapshot.append( [simTime, indices] )
self.displayValues( indices )
def displayValues( self, indices ):
for idx, seg in zip( indices, self.segments ):
seg.color = self.rgb[ idx]
#seg.radius = self.diaScale * self.activeDia[idx]
def replaySnapshot( self, idx ):
if idx >= len( self.snapshot ):
return 0.0
self.displayValues( self.snapshot[idx][1] )
return self.snapshot[idx][0] # return frame time
def updateDiameter( self ):
dia = self.dataWrapper_.getCoords()[:,6]
for s, w in zip( self.segments, dia ):
s.radius = self.diaScale * w / 2.0
def cylinderDraw( self, _scene ):
for idx, coord in enumerate( self.dataWrapper_.getCoords() ):
v0 = list2vec( coord[0:3] )
v1 = list2vec( coord[3:6] )
radius = self.diaScale * coord[6] / 2.0
opacity = self.opacity[idx]
rod = vp.cylinder( canvas = _scene, pos = v0, axis = v1 - v0, radius = radius, opacity = opacity )
self.segments.append( rod )
def findDisplayObject( self, obj ):
try:
idx = self.segments.index( obj )
return self.dataWrapper_.objPathFromIndex( idx ), self.dataWrapper_.field_
except ValueError:
return None
def plotHistory( self, path, field, graph, plot ):
t, v = self.dataWrapper_.getHistory( path, field )
if len( t ) == 0:
print( "No data history for '", path, ".", field )
return
#self.graph = vp.graph( title = path + "." + field, xtitle = "Time (s)", ytitle = field + " Units here", width = 800, fast=False, pos=self.colorbar.caption_anchor )
graph.title = path + "." + field
dat = [[x,y] for x, y in zip( t, v ) ]
plot.data = dat
#print (dat)
#print( "IN plotHistory, ", len( dat), len( v ) )
#plot.data = [[x,y] for x, y in zip( t, v ) ]
#plot.data = [[x,sin(x)] for x in range( 0.0, 10.0, 0.1 ) ]
'''
fig = plt.figure( 1 )
plt.ion()
plt.title( path + "." + field )
plt.xlabel( "Time (s)" )
plt.ylabel( field + " um, units?" )
plt.plot( t, v )
plt.show( block = False )
fig.canvas.draw()
'''
#####################################################################
class MooNeuron( MooDrawable ):
''' Draws collection of line segments of defined dia and color'''
def __init__( self,
dataWrapper,
field = 'Vm',
colormap = 'jet',
lenScale = 1.0, diaScale = 1.0, fieldScale = 1.0,
autoscale = False,
valMin = -0.1, valMax = 0.05,
):
#self.isFieldOnCompt =
#field in ( 'Vm', 'Im', 'Rm', 'Cm', 'Ra', 'inject', 'diameter' )
MooDrawable.__init__( self, dataWrapper,
colormap = colormap, lenScale = lenScale,
diaScale = diaScale, fieldScale = fieldScale,
autoscale = autoscale,
valMin = valMin, valMax = valMax )
self.opacity = np.ones( dataWrapper.numObj() ) * 0.5
def drawForTheFirstTime( self, _scene ):
self.cylinderDraw( _scene )
#####################################################################
class MooReacSystem( MooDrawable ):
''' Draws collection of line segments of defined dia and color'''
def __init__( self,
dataWrapper,
colormap = 'jet',
lenScale = 1e0, diaScale = 1.0, fieldScale = 1.0,
autoscale = False,
valMin = 0.0, valMax = 1.0
):
MooDrawable.__init__( self, dataWrapper,
colormap = colormap, lenScale = lenScale,
diaScale = diaScale, fieldScale = fieldScale,
autoscale = autoscale,
valMin = valMin, valMax = valMax )
self.opacity = np.ones( dataWrapper.numObj() )
def drawForTheFirstTime( self, _scene ):
if self.dataWrapper_.numObj() == 0:
return
mt = self.dataWrapper_.meshType()
if mt in ["NeuroMesh", "CylMesh", "SpineMesh", "PsdMesh"]:
self.cylinderDraw( _scene )
elif mt == "SpineMesh":
self.spineDraw( _scene )
elif mt == "PresynMesh":
self.presynDraw( _scene )
elif mt == "EndoMesh":
self.endoDraw( _scene )
def spineDraw( self, _scene ):
# Spine entry has head[3], shaft[3], root[3], dia.
for idx, coord in enumerate( self.dataWrapper_.getCoords() ):
v0 = list2vec( coord[0:3] )
v1 = list2vec( coord[3:6] )
radius = self.diaScale * coord[6] / 2.0
opacity = self.opacity[idx]
rod = vp.cylinder( canvas = _scene, pos = v0, axis = v1 - v0, radius = radius, opacity = opacity )
self.segments.append( rod )
def presynDraw( self, _scene ):
for idx, coord in enumerate( self.dataWrapper_.getCoords() ):
v0 = list2vec( coord[0:3] )
v1 = list2vec( coord[3:6] )
radius = self.diaScale * coord[6] / 2.0
opacity = self.opacity[idx]
cone = vp.cone( canvas = _scene, pos = v0, axis = v0 - v1, radius = radius, opacity = opacity )
self.segments.append( cone )
def endoDraw( self, _scene ):
for idx, coord in enumerate( self.dataWrapper_.getCoords() ):
v0 = list2vec( coord[0:3] )
v1 = list2vec( coord[3:6] )
radius = self.diaScale * coord[6] / 2.0
opacity = self.opacity[idx]
sphere = vp.sphere( canvas = _scene, pos = (v0 + v1)/2.0, radius = radius, opacity = opacity )
self.segments.append( sphere )
|
BhallaLab/moose-core
|
python/rdesigneur/moogul.py
|
Python
|
gpl-3.0
| 25,407
|
[
"MOOSE"
] |
7ad2bf32fd36f87066e4cd6937ba4a13b9fcae005f2da14fcfd1bc2325e6ef54
|
import numpy as np
import numpy.random as rng
from Gaussian import Gaussian
class Track:
"""
A trajectory in space
"""
def __init__(self, num=10, ndim=6):
"""
num = number of points in the track
ndim = dimensionality of the space
"""
self.num, self.ndim = num, ndim
self.pos = np.empty((num, ndim))
def from_prior(self, L=5.):
"""
Generate a track from a standard AR(1) prior
"""
alpha = np.exp(-1./L)
beta = np.sqrt(1. - alpha**2)
self.pos[0, :] = rng.randn(self.ndim)
for i in range(1, self.num):
self.pos[i, :] = alpha*self.pos[i-1, :] + beta*rng.randn(self.ndim)
def evaluate_mog(self, x, y):
"""
Make a mixture of gaussians from this track
and evaluate it
"""
f = np.zeros(x.shape)
params = self.pos.copy()
params[:,2] = np.exp(params[:,2])
params[:,3] = np.exp(params[:,3])
params[:,4] = np.exp(0.5*params[:,4])
for i in range(self.num):
gaussian = Gaussian(params=params[i, :])
f += gaussian.evaluate(x, y)
return f
if __name__ == '__main__':
import matplotlib.pyplot as plt
track = Track(100)
track.from_prior(L=20.)
plt.plot(track.pos[:,0])
plt.show()
# Set up cartesian coordinate grid
x = np.linspace(-5., 5., 1001)
[x, y] = np.meshgrid(x, x[::-1])
# Make a gaussian
gaussian = Gaussian()
f = track.evaluate_mog(x, y)
plt.imshow(f, interpolation='nearest')
plt.show()
|
eggplantbren/MogTrack
|
code/python/Track.py
|
Python
|
mit
| 1,365
|
[
"Gaussian"
] |
e4ad96cc77d09cb00655a410006540c8e7dfac2d8561b1fb439fb60d4f32e97c
|
# Load the list of available alignments when the tool is initialized
aligndb = dict()
for line in open( "/depot/data2/galaxy/alignseq.loc" ):
fields = line.split()
if fields[0] == "align":
try: aligndb[fields[1]].append( fields[2] )
except: aligndb[fields[1]] = [ fields[2] ]
def get_available_alignments_for_build( build ):
# FIXME: We need a database of descriptive names corresponding to dbkeys.
# We need to resolve the musMusX <--> mmX confusion
rval = []
if build[0:2] == "mm":
build = build.replace('mm','musMus')
if build[0:2] == "rn":
build = build.replace('rn','ratNor')
if build in aligndb:
for val in aligndb[build]:
rval.append( ( val, val, False ) )
return rval
|
jmchilton/galaxy-central
|
tools/extract/extractAxt_wrapper_code.py
|
Python
|
mit
| 780
|
[
"Galaxy"
] |
f3f909f3d1c4fcf5783e40ad1552ffbfa454e5ffbbab56d51325648370228e5a
|
""" ..mod: FTSRequest
=================
Helper class to perform FTS job submission and monitoring.
"""
# # imports
import sys
import re
import time
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.File import checkGuid
from DIRAC.Core.Utilities.Adler import compareAdler, intAdlerToHex, hexAdlerToInt
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
from DIRAC.Core.Utilities.Time import dateTime
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
# # RCSID
__RCSID__ = "$Id$"
class FTSRequest( object ):
"""
.. class:: FTSRequest
Helper class for FTS job submission and monitoring.
"""
# # default checksum type
__defaultCksmType = "ADLER32"
# # flag to disablr/enable checksum test, default: disabled
__cksmTest = False
def __init__( self ):
"""c'tor
:param self: self reference
"""
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
# # final states tuple
self.finalStates = ( 'Canceled', 'Failed', 'Hold',
'Finished', 'FinishedDirty' )
# # failed states tuple
self.failedStates = ( 'Canceled', 'Failed',
'Hold', 'FinishedDirty' )
# # successful states tuple
self.successfulStates = ( 'Finished', 'Done' )
# # all file states tuple
self.fileStates = ( 'Done', 'Active', 'Pending', 'Ready', 'Canceled', 'Failed',
'Finishing', 'Finished', 'Submitted', 'Hold', 'Waiting' )
self.statusSummary = {}
# # request status
self.requestStatus = 'Unknown'
# # dict for FTS job files
self.fileDict = {}
# # dict for replicas information
self.catalogReplicas = {}
# # dict for metadata information
self.catalogMetadata = {}
# # dict for files that failed to register
self.failedRegistrations = {}
# # placehoder for FileCatalog reference
self.oCatalog = None
# # submit timestamp
self.submitTime = ''
# # placeholder FTS job GUID
self.ftsGUID = ''
# # placeholder for FTS server URL
self.ftsServer = ''
# # flag marking FTS job completness
self.isTerminal = False
# # completness percentage
self.percentageComplete = 0.0
# # source SE name
self.sourceSE = ''
# # flag marking source SE validity
self.sourceValid = False
# # source space token
self.sourceToken = ''
# # target SE name
self.targetSE = ''
# # flag marking target SE validity
self.targetValid = False
# # target space token
self.targetToken = ''
# # placeholder for target StorageElement
self.oTargetSE = None
# # placeholder for source StorageElement
self.oSourceSE = None
# # checksum type, set it to default
self.__cksmType = self.__defaultCksmType
# # disable checksum test by default
self.__cksmTest = False
# # statuses that prevent submitting to FTS
self.noSubmitStatus = ( 'Failed', 'Done', 'Staging' )
# # were sources resolved?
self.sourceResolved = False
# # Number of file transfers actually submitted
self.submittedFiles = 0
self.transferTime = 0
self.submitCommand = Operations().getValue( 'DataManagement/FTSPlacement/FTS2/SubmitCommand', 'glite-transfer-submit' )
self.monitorCommand = Operations().getValue( 'DataManagement/FTSPlacement/FTS2/MonitorCommand', 'glite-transfer-status' )
self.ftsVersion = Operations().getValue( 'DataManagement/FTSVersion', 'FTS2' )
self.ftsJob = None
self.ftsFiles = []
####################################################################
#
# Methods for setting/getting/checking the SEs
#
def setSourceSE( self, se ):
""" set SE for source
:param self: self reference
:param str se: source SE name
"""
if se == self.targetSE:
return S_ERROR( "SourceSE is TargetSE" )
self.sourceSE = se
self.oSourceSE = StorageElement( self.sourceSE )
return self.__checkSourceSE()
def __checkSourceSE( self ):
""" check source SE availability
:param self: self reference
"""
if not self.sourceSE:
return S_ERROR( "SourceSE not set" )
res = self.oSourceSE.isValid( 'Read' )
if not res['OK']:
return S_ERROR( "SourceSE not available for reading" )
res = self.__getSESpaceToken( self.oSourceSE )
if not res['OK']:
self.log.error( "FTSRequest failed to get SRM Space Token for SourceSE", res['Message'] )
return S_ERROR( "SourceSE does not support FTS transfers" )
if self.__cksmTest:
res = self.oSourceSE.getChecksumType()
if not res["OK"]:
self.log.error( "Unable to get checksum type for SourceSE",
"%s: %s" % ( self.sourceSE, res["Message"] ) )
cksmType = res["Value"]
if cksmType in ( "NONE", "NULL" ):
self.log.warn( "Checksum type set to %s at SourceSE %s, disabling checksum test" % ( cksmType,
self.sourceSE ) )
self.__cksmTest = False
elif cksmType != self.__cksmType:
self.log.warn( "Checksum type mismatch, disabling checksum test" )
self.__cksmTest = False
self.sourceToken = res['Value']
self.sourceValid = True
return S_OK()
def setTargetSE( self, se ):
""" set target SE
:param self: self reference
:param str se: target SE name
"""
if se == self.sourceSE:
return S_ERROR( "TargetSE is SourceSE" )
self.targetSE = se
self.oTargetSE = StorageElement( self.targetSE )
return self.__checkTargetSE()
def setTargetToken( self, token ):
""" target space token setter
:param self: self reference
:param str token: target space token
"""
self.targetToken = token
return S_OK()
def __checkTargetSE( self ):
""" check target SE availability
:param self: self reference
"""
if not self.targetSE:
return S_ERROR( "TargetSE not set" )
res = self.oTargetSE.isValid( 'Write' )
if not res['OK']:
return S_ERROR( "TargetSE not available for writing" )
res = self.__getSESpaceToken( self.oTargetSE )
if not res['OK']:
self.log.error( "FTSRequest failed to get SRM Space Token for TargetSE", res['Message'] )
return S_ERROR( "TargetSE does not support FTS transfers" )
# # check checksum types
if self.__cksmTest:
res = self.oTargetSE.getChecksumType()
if not res["OK"]:
self.log.error( "Unable to get checksum type for TargetSE",
"%s: %s" % ( self.targetSE, res["Message"] ) )
cksmType = res["Value"]
if cksmType in ( "NONE", "NULL" ):
self.log.warn( "Checksum type set to %s at TargetSE %s, disabling checksum test" % ( cksmType,
self.targetSE ) )
self.__cksmTest = False
elif cksmType != self.__cksmType:
self.log.warn( "Checksum type mismatch, disabling checksum test" )
self.__cksmTest = False
self.targetToken = res['Value']
self.targetValid = True
return S_OK()
@staticmethod
def __getSESpaceToken( oSE ):
""" get space token from StorageElement instance
:param self: self reference
:param StorageElement oSE: StorageElement instance
"""
res = oSE.getStorageParameters( "SRM2" )
if not res['OK']:
return res
return S_OK( res['Value'].get( 'SpaceToken' ) )
####################################################################
#
# Methods for setting/getting FTS request parameters
#
def setFTSGUID( self, guid ):
""" FTS job GUID setter
:param self: self reference
:param str guid: string containg GUID
"""
if not checkGuid( guid ):
return S_ERROR( "Incorrect GUID format" )
self.ftsGUID = guid
return S_OK()
def setFTSServer( self, server ):
""" FTS server setter
:param self: self reference
:param str server: FTS server URL
"""
self.ftsServer = server
return S_OK()
def isRequestTerminal( self ):
""" check if FTS job has terminated
:param self: self reference
"""
if self.requestStatus in self.finalStates:
self.isTerminal = True
return S_OK( self.isTerminal )
def setCksmTest( self, cksmTest = False ):
""" set cksm test
:param self: self reference
:param bool cksmTest: flag to enable/disable checksum test
"""
self.__cksmTest = bool( cksmTest )
return S_OK( self.__cksmTest )
####################################################################
#
# Methods for setting/getting/checking files and their metadata
#
def setLFN( self, lfn ):
""" add LFN :lfn: to :fileDict:
:param self: self reference
:param str lfn: LFN to add to
"""
self.fileDict.setdefault( lfn, {'Status':'Waiting'} )
return S_OK()
def setSourceSURL( self, lfn, surl ):
""" source SURL setter
:param self: self reference
:param str lfn: LFN
:param str surl: source SURL
"""
target = self.fileDict[lfn].get( 'Target' )
if target == surl:
return S_ERROR( "Source and target the same" )
return self.__setFileParameter( lfn, 'Source', surl )
def getSourceSURL( self, lfn ):
""" get source SURL for LFN :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Source' )
def setTargetSURL( self, lfn, surl ):
""" set target SURL for LFN :lfn:
:param self: self reference
:param str lfn: LFN
:param str surl: target SURL
"""
source = self.fileDict[lfn].get( 'Source' )
if source == surl:
return S_ERROR( "Source and target the same" )
return self.__setFileParameter( lfn, 'Target', surl )
def getFailReason( self, lfn ):
""" get fail reason for file :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Reason' )
def getRetries( self, lfn ):
""" get number of attepmts made to transfer file :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Retries' )
def getTransferTime( self, lfn ):
""" get duration of transfer for file :lfn:
:param self: self reference
:param str lfn: LFN
"""
return self.__getFileParameter( lfn, 'Duration' )
def getFailed( self ):
""" get list of wrongly transferred LFNs
:param self: self reference
"""
return S_OK( [ lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status', '' ) in self.failedStates ] )
def getStaging( self ):
""" get files set for prestaging """
return S_OK( [lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status', '' ) == 'Staging'] )
def getDone( self ):
""" get list of succesfully transferred LFNs
:param self: self reference
"""
return S_OK( [ lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status', '' ) in self.successfulStates ] )
def __setFileParameter( self, lfn, paramName, paramValue ):
""" set :paramName: to :paramValue: for :lfn: file
:param self: self reference
:param str lfn: LFN
:param str paramName: parameter name
:param mixed paramValue: a new parameter value
"""
self.setLFN( lfn )
self.fileDict[lfn][paramName] = paramValue
return S_OK()
def __getFileParameter( self, lfn, paramName ):
""" get value of :paramName: for file :lfn:
:param self: self reference
:param str lfn: LFN
:param str paramName: parameter name
"""
if lfn not in self.fileDict:
return S_ERROR( "Supplied file not set" )
if paramName not in self.fileDict[lfn]:
return S_ERROR( "%s not set for file" % paramName )
return S_OK( self.fileDict[lfn][paramName] )
####################################################################
#
# Methods for submission
#
def submit( self, monitor = False, printOutput = True ):
""" submit FTS job
:param self: self reference
:param bool monitor: flag to monitor progress of FTS job
:param bool printOutput: flag to print output of execution to stdout
"""
res = self.__prepareForSubmission()
if not res['OK']:
return res
res = self.__submitFTSTransfer()
if not res['OK']:
return res
resDict = { 'ftsGUID' : self.ftsGUID, 'ftsServer' : self.ftsServer, 'submittedFiles' : self.submittedFiles }
if monitor or printOutput:
gLogger.always( "Submitted %s@%s" % ( self.ftsGUID, self.ftsServer ) )
if monitor:
self.monitor( untilTerminal = True, printOutput = printOutput, full = False )
return S_OK( resDict )
def __prepareForSubmission( self ):
""" check validity of job before submission
:param self: self reference
"""
if not self.fileDict:
return S_ERROR( "No files set" )
if not self.sourceValid:
return S_ERROR( "SourceSE not valid" )
if not self.targetValid:
return S_ERROR( "TargetSE not valid" )
if not self.ftsServer:
res = self.__resolveFTSServer()
if not res['OK']:
return S_ERROR( "FTSServer not valid" )
self.resolveSource()
self.resolveTarget()
res = self.__filesToSubmit()
if not res['OK']:
return S_ERROR( "No files to submit" )
return S_OK()
def __getCatalogObject( self ):
""" CatalogInterface instance facade
:param self: self reference
"""
try:
if not self.oCatalog:
self.oCatalog = FileCatalog()
return S_OK()
except:
return S_ERROR()
def __updateReplicaCache( self, lfns = None, overwrite = False ):
""" update replica cache for list of :lfns:
:param self: self reference
:param mixed lfns: list of LFNs
:param bool overwrite: flag to trigger cache clearing and updating
"""
if not lfns:
lfns = self.fileDict.keys()
toUpdate = [ lfn for lfn in lfns if ( lfn not in self.catalogReplicas ) or overwrite ]
if not toUpdate:
return S_OK()
res = self.__getCatalogObject()
if not res['OK']:
return res
res = self.oCatalog.getReplicas( toUpdate )
if not res['OK']:
return S_ERROR( "Failed to update replica cache: %s" % res['Message'] )
for lfn, error in res['Value']['Failed'].items():
self.__setFileParameter( lfn, 'Reason', error )
self.__setFileParameter( lfn, 'Status', 'Failed' )
for lfn, replicas in res['Value']['Successful'].items():
self.catalogReplicas[lfn] = replicas
return S_OK()
def __updateMetadataCache( self, lfns = None ):
""" update metadata cache for list of LFNs
:param self: self reference
:param list lnfs: list of LFNs
"""
if not lfns:
lfns = self.fileDict.keys()
toUpdate = [ lfn for lfn in lfns if lfn not in self.catalogMetadata ]
if not toUpdate:
return S_OK()
res = self.__getCatalogObject()
if not res['OK']:
return res
res = self.oCatalog.getFileMetadata( toUpdate )
if not res['OK']:
return S_ERROR( "Failed to get source catalog metadata: %s" % res['Message'] )
for lfn, error in res['Value']['Failed'].items():
self.__setFileParameter( lfn, 'Reason', error )
self.__setFileParameter( lfn, 'Status', 'Failed' )
for lfn, metadata in res['Value']['Successful'].items():
self.catalogMetadata[lfn] = metadata
return S_OK()
def resolveSource( self ):
""" resolve source SE eligible for submission
:param self: self reference
"""
# Avoid resolving sources twice
if self.sourceResolved:
return S_OK()
# Only resolve files that need a transfer
toResolve = [ lfn for lfn in self.fileDict if self.fileDict[lfn].get( "Status", "" ) != "Failed" ]
if not toResolve:
return S_OK()
res = self.__updateMetadataCache( toResolve )
if not res['OK']:
return res
res = self.__updateReplicaCache( toResolve )
if not res['OK']:
return res
# Define the source URLs
for lfn in toResolve:
replicas = self.catalogReplicas.get( lfn, {} )
if self.sourceSE not in replicas:
gLogger.warn( "resolveSource: skipping %s - not replicas at SourceSE %s" % ( lfn, self.sourceSE ) )
self.__setFileParameter( lfn, 'Reason', "No replica at SourceSE" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
res = returnSingleResult( self.oSourceSE.getURL( lfn, protocol = 'srm' ) )
if not res['OK']:
gLogger.warn( "resolveSource: skipping %s - %s" % ( lfn, res["Message"] ) )
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
res = self.setSourceSURL( lfn, res['Value'] )
if not res['OK']:
gLogger.warn( "resolveSource: skipping %s - %s" % ( lfn, res["Message"] ) )
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
toResolve = []
for lfn in self.fileDict:
if "Source" in self.fileDict[lfn]:
toResolve.append( lfn )
if not toResolve:
return S_ERROR( "No eligible Source files" )
# Get metadata of the sources, to check for existance, availability and caching
res = self.oSourceSE.getFileMetadata( toResolve )
if not res['OK']:
return S_ERROR( "Failed to check source file metadata" )
for lfn, error in res['Value']['Failed'].items():
if re.search( 'File does not exist', error ):
gLogger.warn( "resolveSource: skipping %s - source file does not exists" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source file does not exist" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
gLogger.warn( "resolveSource: skipping %s - failed to get source metadata" % lfn )
self.__setFileParameter( lfn, 'Reason', "Failed to get Source metadata" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
toStage = []
nbStagedFiles = 0
for lfn, metadata in res['Value']['Successful'].items():
lfnStatus = self.fileDict.get( lfn, {} ).get( 'Status' )
if metadata['Unavailable']:
gLogger.warn( "resolveSource: skipping %s - source file unavailable" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source file Unavailable" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif metadata['Lost']:
gLogger.warn( "resolveSource: skipping %s - source file lost" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source file Lost" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif not metadata['Cached']:
if lfnStatus != 'Staging':
toStage.append( lfn )
elif metadata['Size'] != self.catalogMetadata[lfn]['Size']:
gLogger.warn( "resolveSource: skipping %s - source file size mismatch" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source size mismatch" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif self.catalogMetadata[lfn]['Checksum'] and metadata['Checksum'] and \
not compareAdler( metadata['Checksum'], self.catalogMetadata[lfn]['Checksum'] ):
gLogger.warn( "resolveSource: skipping %s - source file checksum mismatch" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source checksum mismatch" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif lfnStatus == 'Staging':
# file that was staging is now cached
self.__setFileParameter( lfn, 'Status', 'Waiting' )
nbStagedFiles += 1
# Some files were being staged
if nbStagedFiles:
self.log.info( 'resolveSource: %d files have been staged' % nbStagedFiles )
# Launching staging of files not in cache
if toStage:
gLogger.warn( "resolveSource: %s source files not cached, prestaging..." % len( toStage ) )
stage = self.oSourceSE.prestageFile( toStage )
if not stage["OK"]:
gLogger.error( "resolveSource: error is prestaging", stage["Message"] )
for lfn in toStage:
self.__setFileParameter( lfn, 'Reason', stage["Message"] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
for lfn in toStage:
if lfn in stage['Value']['Successful']:
self.__setFileParameter( lfn, 'Status', 'Staging' )
elif lfn in stage['Value']['Failed']:
self.__setFileParameter( lfn, 'Reason', stage['Value']['Failed'][lfn] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
self.sourceResolved = True
return S_OK()
def resolveTarget( self ):
""" find target SE eligible for submission
:param self: self reference
"""
toResolve = [ lfn for lfn in self.fileDict
if self.fileDict[lfn].get( 'Status' ) not in self.noSubmitStatus ]
if not toResolve:
return S_OK()
res = self.__updateReplicaCache( toResolve )
if not res['OK']:
return res
for lfn in toResolve:
res = returnSingleResult( self.oTargetSE.getURL( lfn, protocol = 'srm' ) )
if not res['OK']:
reason = res.get( 'Message', res['Message'] )
gLogger.warn( "resolveTarget: skipping %s - %s" % ( lfn, reason ) )
self.__setFileParameter( lfn, 'Reason', reason )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
res = self.setTargetSURL( lfn, res['Value'] )
if not res['OK']:
gLogger.warn( "resolveTarget: skipping %s - %s" % ( lfn, res["Message"] ) )
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
continue
toResolve = []
for lfn in self.fileDict:
if "Target" in self.fileDict[lfn]:
toResolve.append( lfn )
if not toResolve:
return S_ERROR( "No eligible Target files" )
res = self.oTargetSE.exists( toResolve )
if not res['OK']:
return S_ERROR( "Failed to check target existence" )
for lfn, error in res['Value']['Failed'].items():
self.__setFileParameter( lfn, 'Reason', error )
self.__setFileParameter( lfn, 'Status', 'Failed' )
toRemove = []
for lfn, exists in res['Value']['Successful'].items():
if exists:
res = self.getSourceSURL( lfn )
if not res['OK']:
gLogger.warn( "resolveTarget: skipping %s - target exists" % lfn )
self.__setFileParameter( lfn, 'Reason', "Target exists" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
elif res['Value'] == self.fileDict[lfn]['Target']:
gLogger.warn( "resolveTarget: skipping %s - source and target pfns are the same" % lfn )
self.__setFileParameter( lfn, 'Reason', "Source and Target the same" )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
toRemove.append( lfn )
if toRemove:
self.oTargetSE.removeFile( toRemove )
return S_OK()
def __filesToSubmit( self ):
"""
check if there is at least one file to submit
:return: S_OK if at least one file is present, S_ERROR otherwise
"""
for lfn in self.fileDict:
lfnStatus = self.fileDict[lfn].get( 'Status' )
source = self.fileDict[lfn].get( 'Source' )
target = self.fileDict[lfn].get( 'Target' )
if lfnStatus not in self.noSubmitStatus and source and target:
return S_OK()
return S_ERROR()
def __createFTSFiles( self ):
""" create LFNs file for glite-transfer-submit command
This file consists one line for each fiel to be transferred:
sourceSURL targetSURL [CHECKSUMTYPE:CHECKSUM]
:param self: self reference
"""
self.__updateMetadataCache()
for lfn in self.fileDict:
lfnStatus = self.fileDict[lfn].get( 'Status' )
if lfnStatus not in self.noSubmitStatus:
cksmStr = ""
# # add chsmType:cksm only if cksmType is specified, else let FTS decide by itself
if self.__cksmTest and self.__cksmType:
checkSum = self.catalogMetadata.get( lfn, {} ).get( 'Checksum' )
if checkSum:
cksmStr = " %s:%s" % ( self.__cksmType, intAdlerToHex( hexAdlerToInt( checkSum ) ) )
ftsFile = FTSFile()
ftsFile.LFN = lfn
ftsFile.SourceSURL = self.fileDict[lfn].get( 'Source' )
ftsFile.TargetSURL = self.fileDict[lfn].get( 'Target' )
ftsFile.SourceSE = self.sourceSE
ftsFile.TargetSE = self.targetSE
ftsFile.Status = self.fileDict[lfn].get( 'Status' )
ftsFile.Checksum = cksmStr
ftsFile.Size = self.catalogMetadata.get( lfn, {} ).get( 'Size' )
self.ftsFiles.append( ftsFile )
self.submittedFiles += 1
return S_OK()
def __createFTSJob( self, guid = None ):
self.__createFTSFiles()
ftsJob = FTSJob()
ftsJob.RequestID = 0
ftsJob.OperationID = 0
ftsJob.SourceSE = self.sourceSE
ftsJob.TargetSE = self.targetSE
ftsJob.SourceToken = self.sourceToken
ftsJob.TargetToken = self.targetToken
ftsJob.FTSServer = self.ftsServer
if guid:
ftsJob.FTSGUID = guid
for ftsFile in self.ftsFiles:
ftsFile.Attempt += 1
ftsFile.Error = ""
ftsJob.addFile( ftsFile )
self.ftsJob = ftsJob
def __submitFTSTransfer( self ):
""" create and execute glite-transfer-submit CLI command
:param self: self reference
"""
log = gLogger.getSubLogger( 'Submit' )
self.__createFTSJob()
submit = self.ftsJob.submitFTS( self.ftsVersion, command = self.submitCommand )
if not submit["OK"]:
log.error( "unable to submit FTSJob: %s" % submit["Message"] )
return submit
log.info( "FTSJob '%s'@'%s' has been submitted" % ( self.ftsJob.FTSGUID, self.ftsJob.FTSServer ) )
# # update statuses for job files
for ftsFile in self.ftsJob:
ftsFile.FTSGUID = self.ftsJob.FTSGUID
ftsFile.Status = "Submitted"
ftsFile.Attempt += 1
log.info( "FTSJob '%s'@'%s' has been submitted" % ( self.ftsJob.FTSGUID, self.ftsJob.FTSServer ) )
self.ftsGUID = self.ftsJob.FTSGUID
return S_OK()
def __resolveFTSServer( self ):
"""
resolve FTS server to use, it should be the closest one from target SE
:param self: self reference
"""
if self.ftsVersion.upper() == 'FTS2':
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTS2ServersForSites
if not self.targetSE:
return S_ERROR( "Target SE not set" )
res = getSitesForSE( self.targetSE )
if not res['OK'] or not res['Value']:
return S_ERROR( "Could not determine target site" )
targetSites = res['Value']
targetSite = ''
for targetSite in targetSites:
targetFTS = getFTS2ServersForSites( [targetSite] )
if targetFTS['OK']:
ftsTarget = targetFTS['Value'][targetSite]
if ftsTarget:
self.ftsServer = ftsTarget
return S_OK( self.ftsServer )
else:
return targetFTS
elif self.ftsVersion.upper() == 'FTS3':
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTS3Servers
res = getFTS3Servers()
if not res['OK']:
return res
ftsServerList = res['Value']
if ftsServerList:
# Here we take the first one, regardless of the policy...
# Unclean but all this will disapear after refactoring the fts code
self.ftsServer = ftsServerList[0]
return S_OK( self.ftsServer )
else:
return S_ERROR( 'Unknown FTS version %s' % self.ftsVersion )
return S_ERROR( 'No FTS server found for %s' % targetSite )
####################################################################
#
# Methods for monitoring
#
def summary( self, untilTerminal = False, printOutput = False ):
""" summary of FTS job
:param self: self reference
:param bool untilTerminal: flag to monitor FTS job to its final state
:param bool printOutput: flag to print out monitoring information to the stdout
"""
res = self.__isSummaryValid()
if not res['OK']:
return res
while not self.isTerminal:
res = self.__parseOutput( full = True )
if not res['OK']:
return res
if untilTerminal:
self.__print()
self.isRequestTerminal()
if res['Value'] or ( not untilTerminal ):
break
time.sleep( 1 )
if untilTerminal:
print ""
if printOutput and ( not untilTerminal ):
return self.dumpSummary( printOutput = printOutput )
return S_OK()
def monitor( self, untilTerminal = False, printOutput = False, full = True ):
""" monitor FTS job
:param self: self reference
:param bool untilTerminal: flag to monitor FTS job to its final state
:param bool printOutput: flag to print out monitoring information to the stdout
"""
if not self.ftsJob:
self.resolveSource()
self.__createFTSJob( self.ftsGUID )
res = self.__isSummaryValid()
if not res['OK']:
return res
if untilTerminal:
res = self.summary( untilTerminal = untilTerminal, printOutput = printOutput )
if not res['OK']:
return res
res = self.__parseOutput( full = full )
if not res['OK']:
return res
if untilTerminal:
self.finalize()
if printOutput:
self.dump()
return res
def dumpSummary( self, printOutput = False ):
""" get FTS job summary as str
:param self: self reference
:param bool printOutput: print summary to stdout
"""
outStr = ''
for status in sorted( self.statusSummary ):
if self.statusSummary[status]:
outStr = '%s\t%-10s : %-10s\n' % ( outStr, status, str( self.statusSummary[status] ) )
outStr = outStr.rstrip( '\n' )
if printOutput:
print outStr
return S_OK( outStr )
def __print( self ):
""" print progress bar of FTS job completeness to stdout
:param self: self reference
"""
width = 100
bits = int( ( width * self.percentageComplete ) / 100 )
outStr = "|%s>%s| %.1f%s %s %s" % ( "="*bits, " "*( width - bits ),
self.percentageComplete, "%",
self.requestStatus, " "*10 )
sys.stdout.write( "%s\r" % ( outStr ) )
sys.stdout.flush()
def dump( self ):
""" print FTS job parameters and files to stdout
:param self: self reference
"""
print "%-10s : %-10s" % ( "Status", self.requestStatus )
print "%-10s : %-10s" % ( "Source", self.sourceSE )
print "%-10s : %-10s" % ( "Target", self.targetSE )
print "%-10s : %-128s" % ( "Server", self.ftsServer )
print "%-10s : %-128s" % ( "GUID", self.ftsGUID )
for lfn in sorted( self.fileDict ):
print "\n %-15s : %-128s" % ( 'LFN', lfn )
for key in ['Source', 'Target', 'Status', 'Reason', 'Duration']:
print " %-15s : %-128s" % ( key, str( self.fileDict[lfn].get( key ) ) )
return S_OK()
def __isSummaryValid( self ):
""" check validity of FTS job summary report
:param self: self reference
"""
if not self.ftsServer:
return S_ERROR( "FTSServer not set" )
if not self.ftsGUID:
return S_ERROR( "FTSGUID not set" )
return S_OK()
def __parseOutput( self, full = False ):
""" execute glite-transfer-status command and parse its output
:param self: self reference
:param bool full: glite-transfer-status verbosity level, when set, collect information of files as well
"""
monitor = self.ftsJob.monitorFTS( self.ftsVersion, command = self.monitorCommand, full = full )
if not monitor['OK']:
return monitor
self.percentageComplete = self.ftsJob.Completeness
self.requestStatus = self.ftsJob.Status
self.submitTime = self.ftsJob.SubmitTime
statusSummary = monitor['Value']
if statusSummary:
for state in statusSummary:
self.statusSummary[state] = statusSummary[state]
self.transferTime = 0
for ftsFile in self.ftsJob:
lfn = ftsFile.LFN
self.__setFileParameter( lfn, 'Status', ftsFile.Status )
self.__setFileParameter( lfn, 'Reason', ftsFile.Error )
self.__setFileParameter( lfn, 'Duration', ftsFile._duration )
targetURL = self.__getFileParameter( lfn, 'Target' )
if not targetURL['OK']:
self.__setFileParameter( lfn, 'Target', ftsFile.TargetSURL )
sourceURL = self.__getFileParameter( lfn, 'Source' )
if not sourceURL['OK']:
self.__setFileParameter( lfn, 'Source', ftsFile.SourceSURL )
self.transferTime += int( ftsFile._duration )
return S_OK()
####################################################################
#
# Methods for finalization
#
def finalize( self ):
""" finalize FTS job
:param self: self reference
"""
self.__updateMetadataCache()
transEndTime = dateTime()
regStartTime = time.time()
res = self.getTransferStatistics()
transDict = res['Value']
res = self.__registerSuccessful( transDict['transLFNs'] )
regSuc, regTotal = res['Value']
regTime = time.time() - regStartTime
if self.sourceSE and self.targetSE:
self.__sendAccounting( regSuc, regTotal, regTime, transEndTime, transDict )
return S_OK()
def getTransferStatistics( self ):
""" collect information of Transfers that can be used by Accounting
:param self: self reference
"""
transDict = { 'transTotal': len( self.fileDict ),
'transLFNs': [],
'transOK': 0,
'transSize': 0 }
for lfn in self.fileDict:
if self.fileDict[lfn].get( 'Status' ) in self.successfulStates:
if self.fileDict[lfn].get( 'Duration', 0 ):
transDict['transLFNs'].append( lfn )
transDict['transOK'] += 1
if lfn in self.catalogMetadata:
transDict['transSize'] += self.catalogMetadata[lfn].get( 'Size', 0 )
return S_OK( transDict )
def getFailedRegistrations( self ):
""" get failed registrations dict
:param self: self reference
"""
return S_OK( self.failedRegistrations )
def __registerSuccessful( self, transLFNs ):
""" register successfully transferred files to the catalogs,
fill failedRegistrations dict for files that failed to register
:param self: self reference
:param list transLFNs: LFNs in FTS job
"""
self.failedRegistrations = {}
toRegister = {}
for lfn in transLFNs:
res = returnSingleResult( self.oTargetSE.getURL( self.fileDict[lfn].get( 'Target' ), protocol = 'srm' ) )
if not res['OK']:
self.__setFileParameter( lfn, 'Reason', res['Message'] )
self.__setFileParameter( lfn, 'Status', 'Failed' )
else:
toRegister[lfn] = { 'PFN' : res['Value'], 'SE' : self.targetSE }
if not toRegister:
return S_OK( ( 0, 0 ) )
res = self.__getCatalogObject()
if not res['OK']:
for lfn in toRegister:
self.failedRegistrations = toRegister
self.log.error( 'Failed to get Catalog Object', res['Message'] )
return S_OK( ( 0, len( toRegister ) ) )
res = self.oCatalog.addReplica( toRegister )
if not res['OK']:
self.failedRegistrations = toRegister
self.log.error( 'Failed to get Catalog Object', res['Message'] )
return S_OK( ( 0, len( toRegister ) ) )
for lfn, error in res['Value']['Failed'].items():
self.failedRegistrations[lfn] = toRegister[lfn]
self.log.error( 'Registration of Replica failed', '%s : %s' % ( lfn, str( error ) ) )
return S_OK( ( len( res['Value']['Successful'] ), len( toRegister ) ) )
def __sendAccounting( self, regSuc, regTotal, regTime, transEndTime, transDict ):
""" send accounting record
:param self: self reference
:param regSuc: number of files successfully registered
:param regTotal: number of files attepted to register
:param regTime: time stamp at the end of registration
:param transEndTime: time stamp at the end of FTS job
:param dict transDict: dict holding couters for files being transerred, their sizes and successfull transfers
"""
oAccounting = DataOperation()
oAccounting.setEndTime( transEndTime )
oAccounting.setStartTime( self.submitTime )
accountingDict = {}
accountingDict['OperationType'] = 'replicateAndRegister'
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'FTS' if 'fts3' not in self.ftsServer else 'FTS3'
accountingDict['RegistrationTime'] = regTime
accountingDict['RegistrationOK'] = regSuc
accountingDict['RegistrationTotal'] = regTotal
accountingDict['TransferOK'] = transDict['transOK']
accountingDict['TransferTotal'] = transDict['transTotal']
accountingDict['TransferSize'] = transDict['transSize']
accountingDict['FinalStatus'] = self.requestStatus
accountingDict['Source'] = self.sourceSE
accountingDict['Destination'] = self.targetSE
accountingDict['TransferTime'] = self.transferTime
oAccounting.setValuesFromDict( accountingDict )
self.log.verbose( "Attempting to commit accounting message..." )
oAccounting.commit()
self.log.verbose( "...committed." )
return S_OK()
|
vmendez/DIRAC
|
DataManagementSystem/Client/FTSRequest.py
|
Python
|
gpl-3.0
| 37,935
|
[
"DIRAC"
] |
a077a45e0f91428a10d39e39374321f902c34493a4dac2c69f8f2762cdd7f3dd
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Development script to test the algorithms of a given model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "[email protected]"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from math import factorial
import numpy as np
import itertools
from random import shuffle
import time
if __name__ == "__main__":
allcg = AllCoordinationGeometries()
while True:
cg_symbol = input("Enter symbol of the geometry for which you want to get the explicit permutations : ")
try:
cg = allcg[cg_symbol]
break
except LookupError:
print("Wrong geometry, try again ...")
continue
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
myindices = range(cg.coordination_number)
test = input(
'Enter if you want to test all possible permutations ("all" or "a") or a given number of random permutations (i.e. "25")'
)
if test == "all" or test == "a":
perms_iterator = itertools.permutations(myindices)
nperms = factorial(cg.coordination_number)
else:
try:
nperms = int(test)
except Exception:
raise ValueError(f"Could not turn {test} into integer ...")
perms_iterator = []
for ii in range(nperms):
shuffle(myindices)
perms_iterator.append(list(myindices))
iperm = 1
t1 = time.clock()
for indices_perm in perms_iterator:
lgf.setup_test_perfect_environment(cg_symbol, indices=indices_perm)
lgf.perfect_geometry = AbstractGeometry.from_cg(cg=cg)
points_perfect = lgf.perfect_geometry.points_wocs_ctwocc()
print(f"Perm # {iperm:d}/{nperms:d} : ", indices_perm)
algos_results = []
for algo in cg.algorithms:
print(algo)
if algo.algorithm_type == "EXPLICIT_PERMUTATIONS":
raise ValueError("Do something for the explicit ones ... (these should anyway be by far ok!)")
results = lgf.coordination_geometry_symmetry_measures_separation_plane(
coordination_geometry=cg,
separation_plane_algo=algo,
tested_permutations=False,
points_perfect=points_perfect,
)
print("Number of permutations tested : ", len(results[0]))
algos_results.append(min(results[0]))
if not np.isclose(min(results[0]), 0.0):
print("Following is not 0.0 ...")
input(results)
print(" => ", algos_results)
iperm += 1
t2 = time.clock()
print(
'Time to test {:d} permutations for geometry "{}" (symbol "{}") : {:.2f} seconds'.format(
nperms, cg.name, cg_symbol, t2 - t1
)
)
|
vorwerkc/pymatgen
|
dev_scripts/chemenv/test_algos.py
|
Python
|
mit
| 3,355
|
[
"pymatgen"
] |
02023b34d0fa52490426841f56e3c138cfe3a23fb853632945f27367ab3dbd20
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 15 10:10:15 2016
@author: Steven_Pellizzeri (spelliz[@]clemson.edu)
"""
import sys
from ase.atoms import string2symbols
from ase.thermochemistry import HarmonicThermo,IdealGasThermo
from ase.io import write, read
from glob import glob
## user input
shape = str(sys.argv[1]) ## strucutre shape
symnum = int(sys.argv[2]) ## symmetry number
spin = int(sys.argv[3]) ## spin number
temp = float(sys.argv[4]) ## temperature in K
pres = float(sys.argv[5]) ## pressure in Pa
RMFREQ_KbT = str(sys.argv[6]) ## Remove frequencies below KbT cutoff
## conversion factors
HarttoeV = float(27.2114)
CmtoeV = float(1.239842E-4)
## get log file name
file_list = glob('*.log')
log_file = file_list[0]
## get SCF energy from the log file
Energy = []
infile = open(log_file)
for line in infile:
if "SCF Done:" in line:
Energy.append(line)
infile.close()
final_energy = Energy[-1]
scf_energy_hartree = float(final_energy.split(" ")[7])
scf_energy_eV = scf_energy_hartree * HarttoeV
## get the frequencies from the frequencies.dat file"
Freq = []
freq_file = open("frequencies.dat")
raw_freq = freq_file.read().replace('[', ' ').replace(']', ' ').split()[-1]
for i in raw_freq.split(','):
Freq.append(float(i)*CmtoeV)
for i in Freq:
if i <= 0:
Freq.remove(i)
freq_kbt_cutoff = float(8.6173324E-5 * temp)
freq_remove = []
if RMFREQ_KbT == "True":
for i in Freq:
if i <= freq_kbt_cutoff:
freq_remove.append(i)
for i in freq_remove:
if i in Freq:
Freq.remove(i)
## get the strucuter from the output file
struc = read(log_file,format='gaussian-out')
## get the ideal gas limit thermodynamic values
thermo = IdealGasThermo(vib_energies=Freq, potentialenergy=scf_energy_eV,
atoms=struc, geometry=shape,
symmetrynumber=symnum, spin=spin)
print "Ideal Gas Limit"
ZPE = thermo.get_ZPE_correction()
H = thermo.get_enthalpy(temperature=temp)
S = thermo.get_entropy(temperature=temp,pressure=pres)
G = thermo.get_gibbs_energy(temperature=temp,pressure=pres)
print " "
print "ZPE correction (ZPE) = ", ZPE, " eV"
print "Ethalpy (H) = ", H, " eV"
print "Entropy (S) = ", S, " eV/K"
print "Gibbs Energy (G) = ", G, " eV"
## get the harmonic limit thermodynamic values
thermo = HarmonicThermo(vib_energies=Freq, potentialenergy=scf_energy_eV)
print
print "Harmonic Approximation"
ZPE = thermo.get_ZPE_correction()
U = thermo.get_internal_energy(temperature=temp)
S = thermo.get_entropy(temperature=temp)
H = thermo.get_helmholtz_energy(temperature=temp)
print " "
print "ZPE correction (ZPE) = ", ZPE , " eV"
print "Internal energy (U) = ", U, " eV"
print "Entropy (S) = ", S, " eV/K"
print "Helmholtz Energy (F) = ", F, " eV"
|
spelliz/Various-Bash-Commands
|
ase-thermo-gaussian-arg.py
|
Python
|
gpl-3.0
| 2,845
|
[
"ASE",
"Gaussian"
] |
087b62c571277c0d5781726eaf670d9e58088ebf39a808c366630a2e92d0f79f
|
###############################################################################
# AST visualizer - generates a DOT file for Graphviz. #
# #
# To generate an image from the DOT file run $ dot -Tpng -o ast.png ast.dot #
# #
###############################################################################
import argparse
import textwrap
from interpreter.interpreter import Interpreter
from interpreter.lexer import Lexer
from interpreter.parser import Parser
class ASTVisualizer(Interpreter):
def __init__(self, parser):
self.parser = parser
self.ncount = 1
self.dot_header = [textwrap.dedent("""\
digraph astgraph {
node [shape=circle, fontsize=12, fontname="Courier", height=.1];
ranksep=.3;
edge [arrowsize=.5]
""")]
self.dot_body = []
self.dot_footer = ['}']
def visit_Program(self, node):
s = ' node{} [label="Program"]\n'.format(self.ncount)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
self.visit(node.block)
s = ' node{} -> node{}\n'.format(node._num, node.block._num)
self.dot_body.append(s)
def visit_Block(self, node):
s = ' node{} [label="Block"]\n'.format(self.ncount)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
for declaration in node.declarations:
self.visit(declaration)
self.visit(node.compound_statement)
for decl_node in node.declarations:
s = ' node{} -> node{}\n'.format(node._num, decl_node._num)
self.dot_body.append(s)
s = ' node{} -> node{}\n'.format(
node._num,
node.compound_statement._num
)
self.dot_body.append(s)
def visit_VarDecl(self, node):
s = ' node{} [label="VarDecl"]\n'.format(self.ncount)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
self.visit(node.var_node)
s = ' node{} -> node{}\n'.format(node._num, node.var_node._num)
self.dot_body.append(s)
self.visit(node.type_node)
s = ' node{} -> node{}\n'.format(node._num, node.type_node._num)
self.dot_body.append(s)
def visit_Type(self, node):
s = ' node{} [label="{}"]\n'.format(self.ncount, node.token.value)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
def visit_Num(self, node):
s = ' node{} [label="{}"]\n'.format(self.ncount, node.token.value)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
def visit_BinOp(self, node):
s = ' node{} [label="{}"]\n'.format(self.ncount, node.op.value)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
self.visit(node.left)
self.visit(node.right)
for child_node in (node.left, node.right):
s = ' node{} -> node{}\n'.format(node._num, child_node._num)
self.dot_body.append(s)
def visit_UnaryOp(self, node):
s = ' node{} [label="unary {}"]\n'.format(self.ncount, node.op.value)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
self.visit(node.expr)
s = ' node{} -> node{}\n'.format(node._num, node.expr._num)
self.dot_body.append(s)
def visit_Compound(self, node):
s = ' node{} [label="Compound"]\n'.format(self.ncount)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
for child in node.children:
self.visit(child)
s = ' node{} -> node{}\n'.format(node._num, child._num)
self.dot_body.append(s)
def visit_Assign(self, node):
s = ' node{} [label="{}"]\n'.format(self.ncount, node.op.value)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
self.visit(node.left)
self.visit(node.right)
for child_node in (node.left, node.right):
s = ' node{} -> node{}\n'.format(node._num, child_node._num)
self.dot_body.append(s)
def visit_Var(self, node):
s = ' node{} [label="{}"]\n'.format(self.ncount, node.value)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
def visit_NoOp(self, node):
s = ' node{} [label="NoOp"]\n'.format(self.ncount)
self.dot_body.append(s)
node._num = self.ncount
self.ncount += 1
def gendot(self):
tree = self.parser.parse()
self.visit(tree)
return ''.join(self.dot_header + self.dot_body + self.dot_footer)
def main():
argparser = argparse.ArgumentParser(
description='Generate an AST DOT file.'
)
argparser.add_argument(
'fname',
help='Pascal source file'
)
args = argparser.parse_args()
fname = args.fname
text = open(fname, 'r').read()
lexer = Lexer(text)
parser = Parser(lexer)
viz = ASTVisualizer(parser)
content = viz.gendot()
print(content)
if __name__ == '__main__':
main()
|
LucasMagnum/simple-interpreter
|
scripts/genastdot.py
|
Python
|
mit
| 5,360
|
[
"VisIt"
] |
a0ef06d4c91031aacb9d38f2b0ff7333b64b82ad5223840fc4f055fa67901fb3
|
data = (
'ha', # 0x00
'hu', # 0x01
'hi', # 0x02
'haa', # 0x03
'hee', # 0x04
'he', # 0x05
'ho', # 0x06
None, # 0x07
'la', # 0x08
'lu', # 0x09
'li', # 0x0a
'laa', # 0x0b
'lee', # 0x0c
'le', # 0x0d
'lo', # 0x0e
'lwa', # 0x0f
'hha', # 0x10
'hhu', # 0x11
'hhi', # 0x12
'hhaa', # 0x13
'hhee', # 0x14
'hhe', # 0x15
'hho', # 0x16
'hhwa', # 0x17
'ma', # 0x18
'mu', # 0x19
'mi', # 0x1a
'maa', # 0x1b
'mee', # 0x1c
'me', # 0x1d
'mo', # 0x1e
'mwa', # 0x1f
'sza', # 0x20
'szu', # 0x21
'szi', # 0x22
'szaa', # 0x23
'szee', # 0x24
'sze', # 0x25
'szo', # 0x26
'szwa', # 0x27
'ra', # 0x28
'ru', # 0x29
'ri', # 0x2a
'raa', # 0x2b
'ree', # 0x2c
're', # 0x2d
'ro', # 0x2e
'rwa', # 0x2f
'sa', # 0x30
'su', # 0x31
'si', # 0x32
'saa', # 0x33
'see', # 0x34
'se', # 0x35
'so', # 0x36
'swa', # 0x37
'sha', # 0x38
'shu', # 0x39
'shi', # 0x3a
'shaa', # 0x3b
'shee', # 0x3c
'she', # 0x3d
'sho', # 0x3e
'shwa', # 0x3f
'qa', # 0x40
'qu', # 0x41
'qi', # 0x42
'qaa', # 0x43
'qee', # 0x44
'qe', # 0x45
'qo', # 0x46
None, # 0x47
'qwa', # 0x48
None, # 0x49
'qwi', # 0x4a
'qwaa', # 0x4b
'qwee', # 0x4c
'qwe', # 0x4d
None, # 0x4e
None, # 0x4f
'qha', # 0x50
'qhu', # 0x51
'qhi', # 0x52
'qhaa', # 0x53
'qhee', # 0x54
'qhe', # 0x55
'qho', # 0x56
None, # 0x57
'qhwa', # 0x58
None, # 0x59
'qhwi', # 0x5a
'qhwaa', # 0x5b
'qhwee', # 0x5c
'qhwe', # 0x5d
None, # 0x5e
None, # 0x5f
'ba', # 0x60
'bu', # 0x61
'bi', # 0x62
'baa', # 0x63
'bee', # 0x64
'be', # 0x65
'bo', # 0x66
'bwa', # 0x67
'va', # 0x68
'vu', # 0x69
'vi', # 0x6a
'vaa', # 0x6b
'vee', # 0x6c
've', # 0x6d
'vo', # 0x6e
'vwa', # 0x6f
'ta', # 0x70
'tu', # 0x71
'ti', # 0x72
'taa', # 0x73
'tee', # 0x74
'te', # 0x75
'to', # 0x76
'twa', # 0x77
'ca', # 0x78
'cu', # 0x79
'ci', # 0x7a
'caa', # 0x7b
'cee', # 0x7c
'ce', # 0x7d
'co', # 0x7e
'cwa', # 0x7f
'xa', # 0x80
'xu', # 0x81
'xi', # 0x82
'xaa', # 0x83
'xee', # 0x84
'xe', # 0x85
'xo', # 0x86
None, # 0x87
'xwa', # 0x88
None, # 0x89
'xwi', # 0x8a
'xwaa', # 0x8b
'xwee', # 0x8c
'xwe', # 0x8d
None, # 0x8e
None, # 0x8f
'na', # 0x90
'nu', # 0x91
'ni', # 0x92
'naa', # 0x93
'nee', # 0x94
'ne', # 0x95
'no', # 0x96
'nwa', # 0x97
'nya', # 0x98
'nyu', # 0x99
'nyi', # 0x9a
'nyaa', # 0x9b
'nyee', # 0x9c
'nye', # 0x9d
'nyo', # 0x9e
'nywa', # 0x9f
'\'a', # 0xa0
'\'u', # 0xa1
None, # 0xa2
'\'aa', # 0xa3
'\'ee', # 0xa4
'\'e', # 0xa5
'\'o', # 0xa6
'\'wa', # 0xa7
'ka', # 0xa8
'ku', # 0xa9
'ki', # 0xaa
'kaa', # 0xab
'kee', # 0xac
'ke', # 0xad
'ko', # 0xae
None, # 0xaf
'kwa', # 0xb0
None, # 0xb1
'kwi', # 0xb2
'kwaa', # 0xb3
'kwee', # 0xb4
'kwe', # 0xb5
None, # 0xb6
None, # 0xb7
'kxa', # 0xb8
'kxu', # 0xb9
'kxi', # 0xba
'kxaa', # 0xbb
'kxee', # 0xbc
'kxe', # 0xbd
'kxo', # 0xbe
None, # 0xbf
'kxwa', # 0xc0
None, # 0xc1
'kxwi', # 0xc2
'kxwaa', # 0xc3
'kxwee', # 0xc4
'kxwe', # 0xc5
None, # 0xc6
None, # 0xc7
'wa', # 0xc8
'wu', # 0xc9
'wi', # 0xca
'waa', # 0xcb
'wee', # 0xcc
'we', # 0xcd
'wo', # 0xce
None, # 0xcf
'`a', # 0xd0
'`u', # 0xd1
'`i', # 0xd2
'`aa', # 0xd3
'`ee', # 0xd4
'`e', # 0xd5
'`o', # 0xd6
None, # 0xd7
'za', # 0xd8
'zu', # 0xd9
'zi', # 0xda
'zaa', # 0xdb
'zee', # 0xdc
'ze', # 0xdd
'zo', # 0xde
'zwa', # 0xdf
'zha', # 0xe0
'zhu', # 0xe1
'zhi', # 0xe2
'zhaa', # 0xe3
'zhee', # 0xe4
'zhe', # 0xe5
'zho', # 0xe6
'zhwa', # 0xe7
'ya', # 0xe8
'yu', # 0xe9
'yi', # 0xea
'yaa', # 0xeb
'yee', # 0xec
'ye', # 0xed
'yo', # 0xee
None, # 0xef
'da', # 0xf0
'du', # 0xf1
'di', # 0xf2
'daa', # 0xf3
'dee', # 0xf4
'de', # 0xf5
'do', # 0xf6
'dwa', # 0xf7
'dda', # 0xf8
'ddu', # 0xf9
'ddi', # 0xfa
'ddaa', # 0xfb
'ddee', # 0xfc
'dde', # 0xfd
'ddo', # 0xfe
'ddwa', # 0xff
)
|
avian2/unidecode
|
unidecode/x012.py
|
Python
|
gpl-2.0
| 4,293
|
[
"BWA"
] |
5e889116f36d1d5dbd43be8a7293c14ab0b8b0b77a7da4f3e2d29910c210e393
|
# -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.messageboxext import MessageBoxExt
def openloadApi(self, data):
if re.search('IP address not authorized', data):
message = self.session.open(MessageBoxExt, _("IP address not authorized. Visit https://openload.co/pair"), MessageBoxExt.TYPE_ERROR)
else:
stream_url = re.findall('"url":"(.*?)"', data)
if stream_url:
self._callback(stream_url[0].replace('\\',''))
else:
self.stream_not_found()
|
schleichdi2/OpenNfr_E2_Gui-6.0
|
lib/python/Plugins/Extensions/MediaPortal/resources/hosters/openload.py
|
Python
|
gpl-2.0
| 595
|
[
"VisIt"
] |
0f65000189d88f24e45d696e46b54c8fc2bcc84b1ac55b11972d0794e934e1bf
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 5 15:15:09 2014
@author: drew
"""
from __future__ import division
from matplotlib import use
use('agg')
import matplotlib.pyplot as plt
from matplotlib import cm, _cm
from matplotlib import patches
import numpy as np
import sunpy
from sunpy.map import Map, GenericMap
from sunpy.instr.aia import aiaprep
from sunpy.net import vso
from scipy.io.idl import readsav as read
from sys import argv
from os import path, system, makedirs
import datetime as dt
from sunpy.time.timerange import TimeRange as tr
import glob
from itertools import product
from mpi4py import MPI
from utils import gaussian, load_temp_responses
from astropy.units import Unit
try:
from fits import calc_fits
print 'Fortran extension imported successfully'
except ImportError:
print 'Current extension is broken, missing or incompatible.\n'\
+'Compiling Fortran extension.'
system(path.expanduser('f2py -c -m fits ~/CoronaTemps/fitsmodule.f90'))
from fits import calc_fits
home = path.expanduser('~')
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
args = []
for a in argv[1:]:
for f in [eval, sunpy.time.parse_time]:
try:
a = f(a)
break
except:
continue
args.append(a)
date, n_params, data_dir, datfile, submap, verbose, force_temp_scan = args
wlens = ['094', '131', '171', '193', '211', '335']
t0 = 5.6
thiswlen = None
if rank == 0:
if datfile:
images = {}
f = open(datfile)
# Loop through wavelengths
for line in f:
if line[:3] in wlens:
allwlenmaps = []
thiswlen = line[:3]
print 'Loading {} files'.format(thiswlen)
elif 'fits' in line:
thismap = aiaprep(Map(line[:-1]))
thismap.data /= thismap.exposure_time
allwlenmaps.append(thismap)
elif line.strip() in ['', '\n']:
if thiswlen:
wlenmap = allwlenmaps[-1]
for thismap in allwlenmaps[:-1]:
wlenmap.data += thismap.data
wlenmap.data /= len(allwlenmaps)
images[thiswlen] = wlenmap
images = [images[w] for w in wlens]
else:
images = []
#imagefiles = []
for wl, wlen in enumerate(wlens):
if date == 'model':
fits_dir = path.join(data_dir, 'synthetic', wlen)
images.append(Map(path.join(fits_dir, 'model.fits')))
continue
else:
fits_dir = path.join(data_dir, '{:%Y/*/*}/{}'.format(date, wlen))
if verbose: print 'Searching {} for AIA data'.format(fits_dir)
timerange = tr(date - dt.timedelta(seconds=5),
date + dt.timedelta(seconds=11))
ntimes = int(timerange.seconds())
times = [time.start() for time in timerange.split(ntimes)]
for time in times:
filename = path.join(fits_dir,
#'aia*{0:%Y?%m?%d}?{0:%H?%M?%S}*lev1?fits'.format(time))
'AIA{0:%Y%m%d_%H%M_*.fits}'.format(time))
if verbose: print filename
filelist = glob.glob(filename)
if verbose: print filelist
if filelist != []:
if verbose: print 'File found: ', filelist[0]
#imagefiles.append(filelist[0])
temp_im = aiaprep(Map(filelist[0]))
if submap:
temp_im = temp_im.submap(*submap)
temp_im.data /= temp_im.exposure_time # Can probably increase speed a bit by making this * (1.0/exp_time)
images.append(temp_im)
break
else:
pass
if len(images) < wl+1:
if verbose: print 'No data found for {}. Downloading...'.format(wlen)
client = vso.VSOClient()
qr = client.query(vso.attrs.Time(timerange.start(), timerange.end()),
vso.attrs.Wave(wlen, wlen),
vso.attrs.Instrument('aia'),
vso.attrs.Provider('JSOC'))
dwpath = path.join(fits_dir.replace('*/*', '{:%m/%d}'.format(date)),
'{file}')
res = client.get(qr, path=dwpath, site='NSO').wait()
temp_im = aiaprep(Map(res))
if submap:
temp_im = temp_im.submap(*submap)
temp_im.data /= temp_im.exposure_time # Can probably increase speed a bit by making this * (1.0/exp_time)
images.append(temp_im)
# Normalise images to 171A if only using one parameter
if n_params == 1:
normim = images[2].data.copy()
if verbose: print 'Normalising images'
for i in range(len(wlens)):
images[i].data /= normim
header = images[2].meta.copy()
images = np.array([im.data for im in images])
# Scatter image data to each process
if rank == 0:
#[images[..., (p/size)*images.shape[2]:((p+1)/size)*images.shape[2]] \
# for p in range(size)]
temp = []
for p in range(size):
mini = (p/size)*images.shape[2]
maxi = ((p+1)/size)*images.shape[2]
temp.append(images[..., mini:maxi])
if verbose: print p, mini, maxi, images[..., mini:maxi].shape
images = temp
if verbose: print len(images), images[0].shape
else:
images = None
images = comm.scatter(images, root=0)
# Get dimensions of image
x, y = images[0].shape
if verbose:
print 'Image size, rank {}:'.format(rank), x, y
print 'Image maxes, rank {}:'.format(rank), [im.max() for im in images]
n_wlens = images.shape[0]
temp = np.arange(t0, 7.01, 0.01)
if n_params == 1:
# Assume a width of the gaussian DEM distribution and normalise the height
widths = [0.1]
heights = [1.0]
else:
widths = np.arange(0.1, 0.8, 0.1)
heights = 10.0 ** np.arange(20, 35.1, 0.1)
# TODO: check if either of the above are sensible ranges of numbers
parvals = np.array([i for i in product(temp, widths, heights)])
n_vals = len(temp) * len(widths) * len(heights)
if verbose: print len(temp), len(widths), len(heights), n_vals, n_vals*6
if rank == 0:
try:
if force_temp_scan:
raise IOError
model = np.memmap(filename='synth_emiss_{}pars'.format(n_params),
dtype='float32', mode='r', shape=(n_vals, n_wlens))
except IOError:
if verbose: print 'No synthetic emission data found. Re-scanning temperature range.'
resp = load_temp_responses()
if n_params == 1:
resp /= resp[2, :]
resp[np.isnan(resp)] = 0
if verbose:
print resp.min(axis=1), np.nanmin(resp, axis=1)
print resp.max(axis=1), np.nanmax(resp, axis=1)
logt = np.arange(0, 15.05, 0.05)
delta_t = logt[1] - logt[0]
model = np.memmap(filename='synth_emiss_{}pars'.format(n_params),
dtype='float32', mode='w+', shape=(n_vals, n_wlens))
for p, params in enumerate(parvals):
dem = gaussian(logt, *params)
f = resp * dem
model[p, :] = np.sum(f, axis=1) * delta_t
if verbose:
print model.max(axis=0)
print model[np.isnan(model)].size
if n_params == 1:
normmod = model[:, 2].reshape((n_vals, 1))
model /= normmod
model.flush()
if verbose: print model.max(axis=0)
else:
model = None
model = comm.bcast(model, root=0)
if verbose:
if rank == 0: print 'Calculating temperature values...'
print rank, images.shape, model.shape, parvals.shape, n_vals, n_wlens, x, y, n_params
print [im.max() for im in images]
print model.max(axis=0)
if n_params == 1:
parvals = parvals[:, 0]
temps = calc_fits(images, model, parvals, n_vals, n_wlens, x, y, n_params)
# Convert EM values to log scale if there are any
if temps.shape[2] > 2: temps[..., 2] = np.log10(temps[..., 2])
if verbose: print 'Done.'
# Get data all back in one place and save it
temps = comm.gather(temps, root=0)
if rank == 0:
if verbose: print len(temps), temps[0].shape
temp = np.zeros(shape=(x, y*size, n_params+1))
for p in range(size):
mini = (p/size)*temp.shape[1]
maxi = ((p+1)/size)*temp.shape[1]
temp[:, mini:maxi, :] = temps[p]
if verbose: print p, mini, maxi, temp[:, mini:maxi, :].shape
temps = temp
if verbose: print 'End ct', temps.shape, temps[..., 0].mean(), temps[..., 1].mean()
tempmap = GenericMap(temps, header)
tempmap.save(path.expanduser('~/CoronaTemps/temporary.fits'))
|
drewleonard42/CoronaTemps
|
create_tempmap.py
|
Python
|
bsd-2-clause
| 8,857
|
[
"Gaussian"
] |
3e083eb8ad40c18ce1c595b6f6583ac9ac4533a1fe88138696dabe1b81ae4a85
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Measuring the force on a single sphere immersed in a fluid with
# fixed velocity boundary conditions created by two
# walls at finite distance.
# The force is compared to th analytical result F=6 pi eta r v
# i.e. the stokes force on the particles.
# We create a box of size box_width x box_width x box_length and
# place an object in the center. We measure the drag force
# in z direction. We create walls in the xz and yz plane at the box
# boundaries, where the velocity is fixed to $v.
#
import espressomd
from espressomd import lb, lbboundaries, shapes, has_features
import unittest as ut
import unittest_decorators as utx
import numpy as np
# Define the LB Parameters
TIME_STEP = 0.4
AGRID = 0.6
KVISC = 6
DENS = 2.3
LB_PARAMS = {'agrid': AGRID,
'dens': DENS,
'visc': KVISC,
'tau': TIME_STEP}
# System setup
radius = 8 * AGRID
box_width = 62 * AGRID
real_width = box_width + 2 * AGRID
box_length = 62 * AGRID
c_s = np.sqrt(1. / 3. * AGRID**2 / TIME_STEP**2)
v = [0, 0, 0.2 * c_s] # The boundary slip
class Stokes:
lbf = None
system = espressomd.System(box_l=[real_width, real_width, box_length])
system.box_l = [real_width, real_width, box_length]
system.time_step = TIME_STEP
system.cell_system.skin = 0.01
def test_stokes(self):
self.system.actors.clear()
self.system.lbboundaries.clear()
self.system.actors.add(self.lbf)
self.system.thermostat.set_lb(LB_fluid=self.lbf, gamma=1.0)
# Setup walls
walls = [None] * 4
walls[0] = lbboundaries.LBBoundary(shape=shapes.Wall(
normal=[-1, 0, 0], dist=-(1 + box_width)), velocity=v)
walls[1] = lbboundaries.LBBoundary(shape=shapes.Wall(
normal=[1, 0, 0], dist=1), velocity=v)
walls[2] = lbboundaries.LBBoundary(shape=shapes.Wall(
normal=[0, -1, 0], dist=-(1 + box_width)), velocity=v)
walls[3] = lbboundaries.LBBoundary(shape=shapes.Wall(
normal=[0, 1, 0], dist=1), velocity=v)
for wall in walls:
self.system.lbboundaries.add(wall)
# setup sphere without slip in the middle
sphere = lbboundaries.LBBoundary(shape=shapes.Sphere(
radius=radius, center=[real_width / 2] * 2 + [box_length / 2],
direction=1))
self.system.lbboundaries.add(sphere)
def size(vector):
tmp = 0
for k in vector:
tmp += k * k
return np.sqrt(tmp)
last_force = -1000.
stokes_force = 6 * np.pi * KVISC * radius * size(v)
self.system.integrator.run(35)
while True:
self.system.integrator.run(10)
force = np.linalg.norm(sphere.get_force())
if np.abs(last_force - force) < 0.01 * stokes_force:
break
last_force = force
force = np.copy(sphere.get_force())
np.testing.assert_allclose(
force,
[0, 0, stokes_force],
rtol=0.03,
atol=stokes_force * 0.03)
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(['LB_BOUNDARIES_GPU', 'EXTERNAL_FORCES'])
class LBGPUStokes(ut.TestCase, Stokes):
def setUp(self):
self.lbf = espressomd.lb.LBFluidGPU(**LB_PARAMS)
@utx.skipIfMissingFeatures(['LB_BOUNDARIES', 'EXTERNAL_FORCES'])
class LBCPUStokes(ut.TestCase, Stokes):
def setUp(self):
self.lbf = espressomd.lb.LBFluid(**LB_PARAMS)
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/lb_stokes_sphere.py
|
Python
|
gpl-3.0
| 4,213
|
[
"ESPResSo"
] |
5607359f517c26aa2d8db11b2c135f73691d56f27785af03b4e15d321d3c9c2a
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import numpy as np
import matplotlib.pyplot as plt
from lib.aifh.util import *
import types
from sklearn import svm, datasets
import sklearn
import scipy.stats
import numpy as np
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.nonlinearities import sigmoid
from lasagne.nonlinearities import softmax
from lasagne.nonlinearities import rectify
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
# Compute a z-score with a mean and standard deviation different than the provided matrix.
def zscore(x,mean,sdev):
return (x-mean)/sdev
def extract_weights(net):
result = None
weights = net.get_all_params_values()
for key in weights:
for a in weights[key]:
for b in a:
if result is None:
result = b
else:
result = np.hstack( [result,b] )
return result
# Define the structure of the neural network
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('output', DenseLayer)]
net0 = NeuralNet(layers=layers0,
input_shape=(None, 4),
dense0_num_units=50,
dense0_nonlinearity = rectify,
output_num_units=3,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
eval_size=0.0,
verbose=1,
max_epochs=100000,
on_epoch_finished=[
EarlyStopping(patience=20)
]
)
# Get the iris dataset from scipy
iris = datasets.load_iris()
# Split the iris dataset into 25% validation, and 75% train. Also shuffle with a seed of 42.
X_train, X_validate, y_train, y_validate = sklearn.cross_validation.train_test_split(
iris.data,iris.target, test_size = 0.25, random_state = 42)
# Calculate the mean and standard deviation vectors (all 4 measurements) for training data.
train_mean = np.mean(X_train, axis=0)
train_sdev = np.std(X_train, axis=0)
# Compute the z-scores for both train and validation. However, use mean and standard deviation for training
# on both. This is customary because we trained on this standard deviation and mean. Additionally, our
# prediction set might too small to calculate a meaningful mean and standard deviation.
X_train_z = zscore(X_train, train_mean, train_sdev) #scipy.stats.mstats.zscore(X_train)
X_validate_z = zscore(X_validate, train_mean, train_sdev) #scipy.stats.mstats.zscore(X_validate)
#These can be used to check my zscore calc to numpy
#print(X_train_z)
#print(scipy.stats.mstats.zscore(X_train))
# Provide our own validation set
def my_split(self, X, y, eval_size):
return X_train_z,X_validate_z,y_train,y_validate
net0.train_test_split = types.MethodType(my_split, net0)
# Train the network
net0.initialize()
d = extract_weights(net0)
print("D:" + str(len(d)))
#net0.fit(X_train_z,y_train)
# Predict the validation set
pred_y = net0.predict(X_validate_z)
# Display predictions and count the number of incorrect predictions.
species_names = ['setosa','versicolour','virginica']
count = 0
wrong = 0
for element in zip(X_validate,y_validate,pred_y):
print("Input: sepal length: {}, sepal width: {}, petal length: {}, petal width: {}; Expected: {}; Actual: {}".format(
element[0][0],element[0][1],element[0][2],element[0][3],
species_names[element[1]],
species_names[element[2]]))
if element[1] != element[2]:
wrong = wrong + 1
count = count + 1
print("Incorrect {}/{} ({}%)".format(wrong,count,(wrong/count)*100))
|
PeterLauris/aifh
|
vol3/vol3-python-examples/examples/example_iris_anneal.py
|
Python
|
apache-2.0
| 4,527
|
[
"VisIt"
] |
1fafdce754f02432e6422c80bc691ee60877afbac1b5e35fc33cc81ee9812910
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import os.path
import warnings
import inspect
from contextlib import contextmanager
from datetime import datetime
import logging
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
import traits.api as t
import numbers
from hyperspy.axes import AxesManager
from hyperspy import io
from hyperspy.drawing import mpl_hie, mpl_hse, mpl_he
from hyperspy.learn.mva import MVA, LearningResults
import hyperspy.misc.utils
from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.drawing import signal as sigdraw
from hyperspy.defaults_parser import preferences
from hyperspy.misc.io.tools import ensure_directory
from hyperspy.misc.utils import iterable_not_string
from hyperspy.external.progressbar import progressbar
from hyperspy.exceptions import SignalDimensionError, DataDimensionError
from hyperspy.misc import rgb_tools
from hyperspy.misc.utils import underline, isiterable
from hyperspy.external.astroML.histtools import histogram
from hyperspy.drawing.utils import animate_legend
from hyperspy.drawing.marker import markers_metadata_dict_to_markers
from hyperspy.misc.slicing import SpecialSlicers, FancySlicing
from hyperspy.misc.utils import slugify
from hyperspy.docstrings.signal import (
ONE_AXIS_PARAMETER, MANY_AXIS_PARAMETER, OUT_ARG, NAN_FUNC)
from hyperspy.docstrings.plot import BASE_PLOT_DOCSTRING, KWARGS_DOCSTRING
from hyperspy.events import Events, Event
from hyperspy.interactive import interactive
from hyperspy.misc.signal_tools import (are_signals_aligned,
broadcast_signals)
from hyperspy.exceptions import VisibleDeprecationWarning
_logger = logging.getLogger(__name__)
class ModelManager(object):
"""Container for models
"""
class ModelStub(object):
def __init__(self, mm, name):
self._name = name
self._mm = mm
self.restore = lambda: mm.restore(self._name)
self.remove = lambda: mm.remove(self._name)
self.pop = lambda: mm.pop(self._name)
self.restore.__doc__ = "Returns the stored model"
self.remove.__doc__ = "Removes the stored model"
self.pop.__doc__ = \
"Returns the stored model and removes it from storage"
def __repr__(self):
return repr(self._mm._models[self._name])
def __init__(self, signal, dictionary=None):
self._signal = signal
self._models = DictionaryTreeBrowser()
self._add_dictionary(dictionary)
def _add_dictionary(self, dictionary=None):
if dictionary is not None:
for k, v in dictionary.items():
if k.startswith('_') or k in ['restore', 'remove']:
raise KeyError("Can't add dictionary with key '%s'" % k)
k = slugify(k, True)
self._models.set_item(k, v)
setattr(self, k, self.ModelStub(self, k))
def _set_nice_description(self, node, names):
ans = {'date': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'dimensions': self._signal.axes_manager._get_dimension_str(),
}
node.add_dictionary(ans)
for n in names:
node.add_node('components.' + n)
def _save(self, name, dictionary):
from itertools import product
_abc = 'abcdefghijklmnopqrstuvwxyz'
def get_letter(models):
howmany = len(models)
if not howmany:
return 'a'
order = int(np.log(howmany) / np.log(26)) + 1
letters = [_abc, ] * order
for comb in product(*letters):
guess = "".join(comb)
if guess not in models.keys():
return guess
if name is None:
name = get_letter(self._models)
else:
name = self._check_name(name)
if name in self._models:
self.remove(name)
self._models.add_node(name)
node = self._models.get_item(name)
names = [c['name'] for c in dictionary['components']]
self._set_nice_description(node, names)
node.set_item('_dict', dictionary)
setattr(self, name, self.ModelStub(self, name))
def store(self, model, name=None):
"""If the given model was created from this signal, stores it
Parameters
----------
model : model
the model to store in the signal
name : {string, None}
the name for the model to be stored with
See Also
--------
remove
restore
pop
"""
if model.signal is self._signal:
self._save(name, model.as_dictionary())
else:
raise ValueError("The model is created from a different signal, "
"you should store it there")
def _check_name(self, name, existing=False):
if not isinstance(name, str):
raise KeyError('Name has to be a string')
if name.startswith('_'):
raise KeyError('Name cannot start with "_" symbol')
if '.' in name:
raise KeyError('Name cannot contain dots (".")')
name = slugify(name, True)
if existing:
if name not in self._models:
raise KeyError(
"Model named '%s' is not currently stored" %
name)
return name
def remove(self, name):
"""Removes the given model
Parameters
----------
name : string
the name of the model to remove
See Also
--------
restore
store
pop
"""
name = self._check_name(name, True)
delattr(self, name)
self._models.__delattr__(name)
def pop(self, name):
"""Returns the restored model and removes it from storage
Parameters
----------
name : string
the name of the model to restore and remove
See Also
--------
restore
store
remove
"""
name = self._check_name(name, True)
model = self.restore(name)
self.remove(name)
return model
def restore(self, name):
"""Returns the restored model
Parameters
----------
name : string
the name of the model to restore
See Also
--------
remove
store
pop
"""
name = self._check_name(name, True)
d = self._models.get_item(name + '._dict').as_dictionary()
return self._signal.create_model(dictionary=copy.deepcopy(d))
def __repr__(self):
return repr(self._models)
def __len__(self):
return len(self._models)
def __getitem__(self, name):
name = self._check_name(name, True)
return getattr(self, name)
class MVATools(object):
# TODO: All of the plotting methods here should move to drawing
def _plot_factors_or_pchars(self, factors, comp_ids=None,
calibrate=True, avg_char=False,
same_window=True, comp_label='PC',
img_data=None,
plot_shifts=True, plot_char=4,
cmap=plt.cm.gray, quiver_color='white',
vector_scale=1,
per_row=3, ax=None):
"""Plot components from PCA or ICA, or peak characteristics
Parameters
----------
comp_ids : None, int, or list of ints
if None, returns maps of all components.
if int, returns maps of components with ids from 0 to given
int.
if list of ints, returns maps of components with ids in
given list.
calibrate : bool
if True, plots are calibrated according to the data in the
axes
manager.
same_window : bool
if True, plots each factor to the same window. They are
not scaled. Default True.
comp_label : string
Title of the plot
cmap : a matplotlib colormap
The colormap used for factor images or
any peak characteristic scatter map
overlay.
Parameters only valid for peak characteristics (or pk char factors):
--------------------------------------------------------------------
img_data - 2D numpy array,
The array to overlay peak characteristics onto. If None,
defaults to the average image of your stack.
plot_shifts - bool, default is True
If true, plots a quiver (arrow) plot showing the shifts for
each
peak present in the component being plotted.
plot_char - None or int
If int, the id of the characteristic to plot as the colored
scatter plot.
Possible components are:
4: peak height
5: peak orientation
6: peak eccentricity
quiver_color : any color recognized by matplotlib
Determines the color of vectors drawn for
plotting peak shifts.
vector_scale : integer or None
Scales the quiver plot arrows. The vector
is defined as one data unit along the X axis.
If shifts are small, set vector_scale so
that when they are multiplied by vector_scale,
they are on the scale of the image plot.
If None, uses matplotlib's autoscaling.
"""
if same_window is None:
same_window = True
if comp_ids is None:
comp_ids = range(factors.shape[1])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
n = len(comp_ids)
if same_window:
rows = int(np.ceil(n / float(per_row)))
fig_list = []
if n < per_row:
per_row = n
if same_window and self.axes_manager.signal_dimension == 2:
f = plt.figure(figsize=(4 * per_row, 3 * rows))
else:
f = plt.figure()
for i in range(len(comp_ids)):
if self.axes_manager.signal_dimension == 1:
if same_window:
ax = plt.gca()
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
ax = sigdraw._plot_1D_component(
factors=factors,
idx=comp_ids[i],
axes_manager=self.axes_manager,
ax=ax,
calibrate=calibrate,
comp_label=comp_label,
same_window=same_window)
if same_window:
plt.legend(ncol=factors.shape[1] // 2, loc='best')
elif self.axes_manager.signal_dimension == 2:
if same_window:
ax = f.add_subplot(rows, per_row, i + 1)
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
sigdraw._plot_2D_component(factors=factors,
idx=comp_ids[i],
axes_manager=self.axes_manager,
calibrate=calibrate, ax=ax,
cmap=cmap, comp_label=comp_label)
if not same_window:
fig_list.append(f)
if same_window: # Main title for same window
title = '%s' % comp_label
if self.axes_manager.signal_dimension == 1:
plt.title(title)
else:
plt.suptitle(title)
animate_legend(f)
try:
plt.tight_layout()
except:
pass
if not same_window:
return fig_list
else:
return f
def _plot_loadings(self, loadings, comp_ids, calibrate=True,
same_window=True, comp_label=None,
with_factors=False, factors=None,
cmap=plt.cm.gray, no_nans=False, per_row=3,
axes_decor='all'):
if same_window is None:
same_window = True
if comp_ids is None:
comp_ids = range(loadings.shape[0])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
n = len(comp_ids)
if same_window:
rows = int(np.ceil(n / float(per_row)))
fig_list = []
if n < per_row:
per_row = n
if same_window and self.axes_manager.signal_dimension == 2:
f = plt.figure(figsize=(4 * per_row, 3 * rows))
else:
f = plt.figure()
for i in range(n):
if self.axes_manager.navigation_dimension == 1:
if same_window:
ax = plt.gca()
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
elif self.axes_manager.navigation_dimension == 2:
if same_window:
ax = f.add_subplot(rows, per_row, i + 1)
else:
if i > 0:
f = plt.figure()
plt.title('%s' % comp_label)
ax = f.add_subplot(111)
sigdraw._plot_loading(
loadings, idx=comp_ids[i], axes_manager=self.axes_manager,
no_nans=no_nans, calibrate=calibrate, cmap=cmap,
comp_label=comp_label, ax=ax, same_window=same_window,
axes_decor=axes_decor)
if not same_window:
fig_list.append(f)
if same_window: # Main title for same window
title = '%s' % comp_label
if self.axes_manager.navigation_dimension == 1:
plt.title(title)
else:
plt.suptitle(title)
try:
plt.tight_layout()
except:
pass
if not same_window:
if with_factors:
return fig_list, self._plot_factors_or_pchars(
factors, comp_ids=comp_ids, calibrate=calibrate,
same_window=same_window, comp_label=comp_label,
per_row=per_row)
else:
return fig_list
else:
if self.axes_manager.navigation_dimension == 1:
plt.legend(ncol=loadings.shape[0] // 2, loc='best')
animate_legend(f)
if with_factors:
return f, self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
per_row=per_row)
else:
return f
def _export_factors(self,
factors,
folder=None,
comp_ids=None,
multiple_files=True,
save_figures=False,
save_figures_format='png',
factor_prefix=None,
factor_format=None,
comp_label=None,
cmap=plt.cm.gray,
plot_shifts=True,
plot_char=4,
img_data=None,
same_window=False,
calibrate=True,
quiver_color='white',
vector_scale=1,
no_nans=True, per_row=3):
from hyperspy._signals.signal2d import Signal2D
from hyperspy._signals.signal1d import Signal1D
if multiple_files is None:
multiple_files = True
if factor_format is None:
factor_format = 'hspy'
# Select the desired factors
if comp_ids is None:
comp_ids = range(factors.shape[1])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
mask = np.zeros(factors.shape[1], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
factors = factors[:, mask]
if save_figures is True:
plt.ioff()
fac_plots = self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
same_window=same_window,
comp_label=comp_label,
img_data=img_data,
plot_shifts=plot_shifts,
plot_char=plot_char,
cmap=cmap,
per_row=per_row,
quiver_color=quiver_color,
vector_scale=vector_scale)
for idx in range(len(comp_ids)):
filename = '%s_%02i.%s' % (factor_prefix, comp_ids[idx],
save_figures_format)
if folder is not None:
filename = os.path.join(folder, filename)
ensure_directory(filename)
_args = {'dpi': 600,
'format': save_figures_format}
fac_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.signal_dimension == 2:
# factor images
axes_dicts = []
axes = self.axes_manager.signal_axes[::-1]
shape = (axes[1].size, axes[0].size)
factor_data = np.rollaxis(
factors.reshape((shape[0], shape[1], -1)), 2)
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts.append({'name': 'factor_index',
'scale': 1.,
'offset': 0.,
'size': int(factors.shape[1]),
'units': 'factor',
'index_in_array': 0, })
s = Signal2D(factor_data,
axes=axes_dicts,
metadata={
'General': {'title': '%s from %s' % (
factor_prefix,
self.metadata.General.title),
}})
elif self.axes_manager.signal_dimension == 1:
axes = [self.axes_manager.signal_axes[0].get_axis_dictionary(),
{'name': 'factor_index',
'scale': 1.,
'offset': 0.,
'size': int(factors.shape[1]),
'units': 'factor',
'index_in_array': 0,
}]
axes[0]['index_in_array'] = 1
s = Signal1D(
factors.T, axes=axes, metadata={
"General": {
'title': '%s from %s' %
(factor_prefix, self.metadata.General.title), }})
filename = '%ss.%s' % (factor_prefix, factor_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.signal_dimension == 1:
axis_dict = self.axes_manager.signal_axes[0].\
get_axis_dictionary()
axis_dict['index_in_array'] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Signal1D(factors[:, index],
axes=[axis_dict, ],
metadata={
"General": {'title': '%s from %s' % (
factor_prefix,
self.metadata.General.title),
}})
filename = '%s-%i.%s' % (factor_prefix,
dim,
factor_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
if self.axes_manager.signal_dimension == 2:
axes = self.axes_manager.signal_axes
axes_dicts = [axes[0].get_axis_dictionary(),
axes[1].get_axis_dictionary()]
axes_dicts[0]['index_in_array'] = 0
axes_dicts[1]['index_in_array'] = 1
factor_data = factors.reshape(
self.axes_manager._signal_shape_in_array + [-1, ])
for dim, index in zip(comp_ids, range(len(comp_ids))):
im = Signal2D(factor_data[..., index],
axes=axes_dicts,
metadata={
"General": {'title': '%s from %s' % (
factor_prefix,
self.metadata.General.title),
}})
filename = '%s-%i.%s' % (factor_prefix,
dim,
factor_format)
if folder is not None:
filename = os.path.join(folder, filename)
im.save(filename)
def _export_loadings(self,
loadings,
folder=None,
comp_ids=None,
multiple_files=True,
loading_prefix=None,
loading_format="hspy",
save_figures_format='png',
comp_label=None,
cmap=plt.cm.gray,
save_figures=False,
same_window=False,
calibrate=True,
no_nans=True,
per_row=3):
from hyperspy._signals.signal2d import Signal2D
from hyperspy._signals.signal1d import Signal1D
if multiple_files is None:
multiple_files = True
if loading_format is None:
loading_format = 'hspy'
if comp_ids is None:
comp_ids = range(loadings.shape[0])
elif not hasattr(comp_ids, '__iter__'):
comp_ids = range(comp_ids)
mask = np.zeros(loadings.shape[0], dtype=np.bool)
for idx in comp_ids:
mask[idx] = 1
loadings = loadings[mask]
if save_figures is True:
plt.ioff()
sc_plots = self._plot_loadings(loadings, comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=comp_label,
cmap=cmap, no_nans=no_nans,
per_row=per_row)
for idx in range(len(comp_ids)):
filename = '%s_%02i.%s' % (loading_prefix, comp_ids[idx],
save_figures_format)
if folder is not None:
filename = os.path.join(folder, filename)
ensure_directory(filename)
_args = {'dpi': 600,
'format': save_figures_format}
sc_plots[idx].savefig(filename, **_args)
plt.ion()
elif multiple_files is False:
if self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[1].size, axes[0].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]['index_in_array'] = 1
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]['index_in_array'] = 2
axes_dicts.append({'name': 'loading_index',
'scale': 1.,
'offset': 0.,
'size': int(loadings.shape[0]),
'units': 'factor',
'index_in_array': 0, })
s = Signal2D(loading_data,
axes=axes_dicts,
metadata={
"General": {'title': '%s from %s' % (
loading_prefix,
self.metadata.General.title),
}})
elif self.axes_manager.navigation_dimension == 1:
cal_axis = self.axes_manager.navigation_axes[0].\
get_axis_dictionary()
cal_axis['index_in_array'] = 1
axes = [{'name': 'loading_index',
'scale': 1.,
'offset': 0.,
'size': int(loadings.shape[0]),
'units': 'comp_id',
'index_in_array': 0, },
cal_axis]
s = Signal2D(loadings,
axes=axes,
metadata={
"General": {'title': '%s from %s' % (
loading_prefix,
self.metadata.General.title),
}})
filename = '%ss.%s' % (loading_prefix, loading_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
else: # Separate files
if self.axes_manager.navigation_dimension == 1:
axis_dict = self.axes_manager.navigation_axes[0].\
get_axis_dictionary()
axis_dict['index_in_array'] = 0
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Signal1D(loadings[index],
axes=[axis_dict, ])
filename = '%s-%i.%s' % (loading_prefix,
dim,
loading_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
elif self.axes_manager.navigation_dimension == 2:
axes_dicts = []
axes = self.axes_manager.navigation_axes[::-1]
shape = (axes[0].size, axes[1].size)
loading_data = loadings.reshape((-1, shape[0], shape[1]))
axes_dicts.append(axes[0].get_axis_dictionary())
axes_dicts[0]['index_in_array'] = 0
axes_dicts.append(axes[1].get_axis_dictionary())
axes_dicts[1]['index_in_array'] = 1
for dim, index in zip(comp_ids, range(len(comp_ids))):
s = Signal2D(loading_data[index, ...],
axes=axes_dicts,
metadata={
"General": {'title': '%s from %s' % (
loading_prefix,
self.metadata.General.title),
}})
filename = '%s-%i.%s' % (loading_prefix,
dim,
loading_format)
if folder is not None:
filename = os.path.join(folder, filename)
s.save(filename)
def plot_decomposition_factors(self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
cmap=plt.cm.gray,
per_row=3,
title=None):
"""Plot factors from a decomposition. In case of 1D signal axis, each
factors line can be toggled on and off by clicking on their
corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list of ints
if None (default), returns maps of all components if the output_dimension was defined when
executing ``decomposition``. Otherwise it raises a ValueError.
if int, returns maps of components with ids from 0 to
given int.
if list of ints, returns maps of components with ids in
given list.
calibrate : bool
if True, calibrates plots where calibration is available
from
the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window. They are
not scaled. Default is True.
title : string
Title of the plot.
cmap : The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
per_row : int, the number of plots in each row, when the
same_window parameter is True.
See Also
--------
plot_decomposition_loadings, plot_decomposition_results.
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError("This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead.")
if same_window is None:
same_window = True
factors = self.learning_results.factors
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"``comp_ids`` argument")
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title('Decomposition factors of',
same_window)
return self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
cmap=cmap,
per_row=per_row)
def plot_bss_factors(self, comp_ids=None, calibrate=True,
same_window=True, comp_label=None,
per_row=3, title=None):
"""Plot factors from blind source separation results. In case of 1D
signal axis, each factors line can be toggled on and off by clicking
on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list of ints
if None, returns maps of all components.
if int, returns maps of components with ids from 0 to
given int.
if list of ints, returns maps of components with ids in
given list.
calibrate : bool
if True, calibrates plots where calibration is available
from
the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window. They are
not scaled. Default is True.
title : string
Title of the plot.
cmap : The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
per_row : int, the number of plots in each row, when the
same_window
parameter is True.
See Also
--------
plot_bss_loadings, plot_bss_results.
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError("This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead.")
if same_window is None:
same_window = True
factors = self.learning_results.bss_factors
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title('BSS factors of', same_window)
return self._plot_factors_or_pchars(factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
per_row=per_row)
def plot_decomposition_loadings(self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
with_factors=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor='all',
title=None):
"""Plot loadings from a decomposition. In case of 1D navigation axis,
each loading line can be toggled on and off by clicking on the legended
line.
Parameters
----------
comp_ids : None, int, or list of ints
if None (default), returns maps of all components if the output_dimension was defined when
executing ``decomposition``. Otherwise it raises a ValueError.
if int, returns maps of components with ids from 0 to
given int.
if list of ints, returns maps of components with ids in
given list.
calibrate : bool
if True, calibrates plots where calibration is available
from the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window. They are
not scaled. Default is True.
title : string
Title of the plot.
with_factors : bool
If True, also returns figure(s) with the factors for the
given comp_ids.
cmap : matplotlib colormap
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If True, removes NaN's from the loading plots.
per_row : int
the number of plots in each row, when the same_window
parameter is True.
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
See Also
--------
plot_decomposition_factors, plot_decomposition_results.
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead.")
if same_window is None:
same_window = True
loadings = self.learning_results.loadings.T
if with_factors:
factors = self.learning_results.factors
else:
factors = None
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"``comp_ids`` argument")
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title('Decomposition loadings of',
same_window)
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def plot_bss_loadings(self, comp_ids=None, calibrate=True,
same_window=True, comp_label=None,
with_factors=False, cmap=plt.cm.gray,
no_nans=False, per_row=3, axes_decor='all',
title=None):
"""Plot loadings from blind source separation results. In case of 1D
navigation axis, each loading line can be toggled on and off by
clicking on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list of ints
if None, returns maps of all components.
if int, returns maps of components with ids from 0 to
given int.
if list of ints, returns maps of components with ids in
given list.
calibrate : bool
if True, calibrates plots where calibration is available
from
the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window. They are
not scaled. Default is True.
title : string
Title of the plot.
with_factors : bool
If True, also returns figure(s) with the factors for the
given comp_ids.
cmap : matplotlib colormap
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If True, removes NaN's from the loading plots.
per_row : int
the number of plots in each row, when the same_window
parameter is True.
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks / labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
See Also
--------
plot_bss_factors, plot_bss_results.
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError("This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_bss_results` instead.")
if same_window is None:
same_window = True
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title('BSS loadings of',
same_window)
loadings = self.learning_results.bss_loadings.T
if with_factors:
factors = self.learning_results.bss_factors
else:
factors = None
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor)
def _get_plot_title(self, base_title='Loadings', same_window=True):
title_md = self.metadata.General.title
title = "%s %s" % (base_title, title_md)
if title_md == '': # remove the 'of' if 'title' is a empty string
title = title.replace(' of ', '')
if not same_window:
title = title.replace('loadings', 'loading')
return title
def export_decomposition_results(self, comp_ids=None,
folder=None,
calibrate=True,
factor_prefix='factor',
factor_format="hspy",
loading_prefix='loading',
loading_format="hspy",
comp_label=None,
cmap=plt.cm.gray,
same_window=False,
multiple_files=True,
no_nans=True,
per_row=3,
save_figures=False,
save_figures_format='png'):
"""Export results from a decomposition to any of the supported
formats.
Parameters
----------
comp_ids : None, int, or list of ints
if None, returns all components/loadings.
if int, returns components/loadings with ids from 0 to
given int.
if list of ints, returns components/loadings with ids in
given list.
folder : str or None
The path to the folder where the file will be saved.
If `None` the
current folder is used by default.
factor_prefix : string
The prefix that any exported filenames for
factors/components
begin with
factor_format : string
The extension of the format that you wish to save to. Default is
"hspy". See `loading format` for more details.
loading_prefix : string
The prefix that any exported filenames for
factors/components
begin with
loading_format : string
The extension of the format that you wish to save to. default
is "hspy". The format determines the kind of output.
- For image formats (tif, png, jpg, etc.), plots are
created using the plotting flags as below, and saved at
600 dpi. One plot per loading is saved.
- For multidimensional formats ("rpl", "hspy"), arrays are
saved in single files. All loadings are contained in the
one file.
- For spectral formats (msa), each loading is saved to a
separate file.
multiple_files : bool
If True, on exporting a file per factor and per loading will
be created. Otherwise only two files will be created, one for
the factors and another for the loadings. The default value can
be chosen in the preferences.
save_figures : bool
If True the same figures that are obtained when using the plot
methods will be saved with 600 dpi resolution
Plotting options (for save_figures = True ONLY)
----------------------------------------------
calibrate : bool
if True, calibrates plots where calibration is available
from
the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window.
comp_label : string, the label that is either the plot title
(if plotting in separate windows) or the label in the legend
(if plotting in the same window)
cmap : The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
per_row : int, the number of plots in each row, when the
same_window
parameter is True.
save_figures_format : str
The image format extension.
See Also
--------
get_decomposition_factors,
get_decomposition_loadings.
"""
factors = self.learning_results.factors
loadings = self.learning_results.loadings.T
self._export_factors(
factors,
folder=folder,
comp_ids=comp_ids,
calibrate=calibrate,
multiple_files=multiple_files,
factor_prefix=factor_prefix,
factor_format=factor_format,
comp_label=comp_label,
save_figures=save_figures,
cmap=cmap,
no_nans=no_nans,
same_window=same_window,
per_row=per_row,
save_figures_format=save_figures_format)
self._export_loadings(
loadings,
comp_ids=comp_ids, folder=folder,
calibrate=calibrate,
multiple_files=multiple_files,
loading_prefix=loading_prefix,
loading_format=loading_format,
comp_label=comp_label,
cmap=cmap,
save_figures=save_figures,
same_window=same_window,
no_nans=no_nans,
per_row=per_row)
def export_bss_results(self,
comp_ids=None,
folder=None,
calibrate=True,
multiple_files=True,
save_figures=False,
factor_prefix='bss_factor',
factor_format="hspy",
loading_prefix='bss_loading',
loading_format="hspy",
comp_label=None, cmap=plt.cm.gray,
same_window=False,
no_nans=True,
per_row=3,
save_figures_format='png'):
"""Export results from ICA to any of the supported formats.
Parameters
----------
comp_ids : None, int, or list of ints
if None, returns all components/loadings.
if int, returns components/loadings with ids from 0 to given
int.
if list of ints, returns components/loadings with ids in
iven list.
folder : str or None
The path to the folder where the file will be saved. If
`None` the
current folder is used by default.
factor_prefix : string
The prefix that any exported filenames for
factors/components
begin with
factor_format : string
The extension of the format that you wish to save to. Default is
"hspy". See `loading format` for more details.
loading_prefix : string
The prefix that any exported filenames for
factors/components
begin with
loading_format : string
The extension of the format that you wish to save to. default
is "hspy". The format determines the kind of output.
- For image formats (tif, png, jpg, etc.), plots are
created using the plotting flags as below, and saved at
600 dpi. One plot per loading is saved.
- For multidimensional formats ("rpl", "hspy"), arrays are
saved in single files. All loadings are contained in the
one file.
- For spectral formats (msa), each loading is saved to a
separate file.
multiple_files : Bool
If True, on exporting a file per factor and per loading
will be created. Otherwise only two files will be created, one
for the factors and another for the loadings. Default is True.
save_figures : Bool
If True the same figures that are obtained when using the
plot
methods will be saved with 600 dpi resolution
Plotting options (for save_figures = True ONLY)
----------------------------------------------
calibrate : bool
if True, calibrates plots where calibration is available
from
the axes_manager. If False, plots are in pixels/channels.
same_window : bool
if True, plots each factor to the same window.
comp_label : string
the label that is either the plot title (if plotting in
separate windows) or the label in the legend (if plotting
in the
same window)
cmap : The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
per_row : int, the number of plots in each row, when the
same_window
parameter is True.
save_figures_format : str
The image format extension.
See Also
--------
get_bss_factors,
get_bss_loadings.
"""
factors = self.learning_results.bss_factors
loadings = self.learning_results.bss_loadings.T
self._export_factors(factors,
folder=folder,
comp_ids=comp_ids,
calibrate=calibrate,
multiple_files=multiple_files,
factor_prefix=factor_prefix,
factor_format=factor_format,
comp_label=comp_label,
save_figures=save_figures,
cmap=cmap,
no_nans=no_nans,
same_window=same_window,
per_row=per_row,
save_figures_format=save_figures_format)
self._export_loadings(loadings,
comp_ids=comp_ids,
folder=folder,
calibrate=calibrate,
multiple_files=multiple_files,
loading_prefix=loading_prefix,
loading_format=loading_format,
comp_label=comp_label,
cmap=cmap,
save_figures=save_figures,
same_window=same_window,
no_nans=no_nans,
per_row=per_row,
save_figures_format=save_figures_format)
def _get_loadings(self, loadings):
from hyperspy.api import signals
data = loadings.T.reshape(
(-1,) + self.axes_manager.navigation_shape[::-1])
signal = signals.BaseSignal(
data,
axes=(
[{"size": data.shape[0], "navigate": True}] +
self.axes_manager._get_navigation_axes_dicts()))
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
return signal
def _get_factors(self, factors):
signal = self.__class__(
factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
axes=[{"size": factors.shape[-1], "navigate": True}] +
self.axes_manager._get_signal_axes_dicts())
signal.set_signal_type(self.metadata.Signal.signal_type)
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
return signal
def get_decomposition_loadings(self):
"""Return the decomposition loadings as a Signal.
See Also
-------
get_decomposition_factors, export_decomposition_results.
"""
signal = self._get_loadings(self.learning_results.loadings)
signal.axes_manager._axes[0].name = "Decomposition component index"
signal.metadata.General.title = "Decomposition loadings of " + \
self.metadata.General.title
return signal
def get_decomposition_factors(self):
"""Return the decomposition factors as a Signal.
See Also
-------
get_decomposition_loadings, export_decomposition_results.
"""
signal = self._get_factors(self.learning_results.factors)
signal.axes_manager._axes[0].name = "Decomposition component index"
signal.metadata.General.title = ("Decomposition factors of " +
self.metadata.General.title)
return signal
def get_bss_loadings(self):
"""Return the blind source separtion loadings as a Signal.
See Also
-------
get_bss_factors, export_bss_results.
"""
signal = self._get_loadings(
self.learning_results.bss_loadings)
signal.axes_manager[0].name = "BSS component index"
signal.metadata.General.title = ("BSS loadings of " +
self.metadata.General.title)
return signal
def get_bss_factors(self):
"""Return the blind source separtion factors as a Signal.
See Also
-------
get_bss_loadings, export_bss_results.
"""
signal = self._get_factors(self.learning_results.bss_factors)
signal.axes_manager[0].name = "BSS component index"
signal.metadata.General.title = ("BSS factors of " +
self.metadata.General.title)
return signal
def plot_bss_results(self,
factors_navigator="smart_auto",
loadings_navigator="smart_auto",
factors_dim=2,
loadings_dim=2,):
"""Plot the blind source separation factors and loadings.
Unlike `plot_bss_factors` and `plot_bss_loadings`, this method displays
one component at a time. Therefore it provides a more compact
visualization than then other two methods. The loadings and factors
are displayed in different windows and each has its own
navigator/sliders to navigate them if they are multidimensional. The
component index axis is syncronize between the two.
Parameters
----------
factors_navigator, loadings_navigator : {"smart_auto", "auto", None, "spectrum",
Signal}
"smart_auto" (default) displays sliders if the navigation
dimension is less than 3. For a description of the other options
see `plot` documentation for details.
factors_dim, loadings_dim: int
Currently HyperSpy cannot plot signals of dimension higher than
two. Therefore, to visualize the BSS results when the
factors or the loadings have signal dimension greater than 2
we can view the data as spectra(images) by setting this parameter
to 1(2). (Default 2)
See Also
--------
plot_bss_factors, plot_bss_loadings, plot_decomposition_results.
"""
factors = self.get_bss_factors()
loadings = self.get_bss_loadings()
_plot_x_results(factors=factors, loadings=loadings,
factors_navigator=factors_navigator,
loadings_navigator=loadings_navigator,
factors_dim=factors_dim,
loadings_dim=loadings_dim)
def plot_decomposition_results(self,
factors_navigator="smart_auto",
loadings_navigator="smart_auto",
factors_dim=2,
loadings_dim=2):
"""Plot the decompostion factors and loadings.
Unlike `plot_factors` and `plot_loadings`, this method displays
one component at a time. Therefore it provides a more compact
visualization than then other two methods. The loadings and factors
are displayed in different windows and each has its own
navigator/sliders to navigate them if they are multidimensional. The
component index axis is syncronize between the two.
Parameters
----------
factors_navigator, loadings_navigator : {"smart_auto", "auto", None, "spectrum",
Signal}
"smart_auto" (default) displays sliders if the navigation
dimension is less than 3. For a description of the other options
see `plot` documentation for details.
factors_dim, loadings_dim : int
Currently HyperSpy cannot plot signals of dimension higher than
two. Therefore, to visualize the BSS results when the
factors or the loadings have signal dimension greater than 2
we can view the data as spectra(images) by setting this parameter
to 1(2). (Default 2)
See Also
--------
plot_factors, plot_loadings, plot_bss_results.
"""
factors = self.get_decomposition_factors()
loadings = self.get_decomposition_loadings()
_plot_x_results(factors=factors, loadings=loadings,
factors_navigator=factors_navigator,
loadings_navigator=loadings_navigator,
factors_dim=factors_dim,
loadings_dim=loadings_dim)
def _plot_x_results(factors, loadings, factors_navigator, loadings_navigator,
factors_dim, loadings_dim):
factors.axes_manager._axes[0] = loadings.axes_manager._axes[0]
if loadings.axes_manager.signal_dimension > 2:
loadings.axes_manager.set_signal_dimension(loadings_dim)
if factors.axes_manager.signal_dimension > 2:
factors.axes_manager.set_signal_dimension(factors_dim)
if (loadings_navigator == "smart_auto" and
loadings.axes_manager.navigation_dimension < 3):
loadings_navigator = "slider"
else:
loadings_navigator = "auto"
if (factors_navigator == "smart_auto" and
(factors.axes_manager.navigation_dimension < 3 or
loadings_navigator is not None)):
factors_navigator = None
else:
factors_navigator = "auto"
loadings.plot(navigator=loadings_navigator)
factors.plot(navigator=factors_navigator)
def _change_API_comp_label(title, comp_label):
if comp_label is not None:
if title is None:
title = comp_label
warnings.warn("The 'comp_label' argument will be deprecated "
"in 2.0, please use 'title' instead",
VisibleDeprecationWarning)
else:
warnings.warn("The 'comp_label' argument will be deprecated "
"in 2.0, Since you are already using the 'title'",
"argument, 'comp_label' is ignored.",
VisibleDeprecationWarning)
return title
class SpecialSlicersSignal(SpecialSlicers):
def __setitem__(self, i, j):
"""x.__setitem__(i, y) <==> x[i]=y
"""
if isinstance(j, BaseSignal):
j = j.data
array_slices = self.obj._get_array_slices(i, self.isNavigation)
self.obj.data[array_slices] = j
def __len__(self):
return self.obj.axes_manager.signal_shape[0]
class BaseSetMetadataItems(t.HasTraits):
def __init__(self, signal):
for key, value in self.mapping.items():
if signal.metadata.has_item(key):
setattr(self, value, signal.metadata.get_item(key))
self.signal = signal
def store(self, *args, **kwargs):
for key, value in self.mapping.items():
if getattr(self, value) != t.Undefined:
self.signal.metadata.set_item(key, getattr(self, value))
class BaseSignal(FancySlicing,
MVA,
MVATools,):
_dtype = "real"
_signal_dimension = -1
_signal_type = ""
_lazy = False
_alias_signal_types = []
_additional_slicing_targets = [
"metadata.Signal.Noise_properties.variance",
]
def __init__(self, data, **kwds):
"""Create a Signal from a numpy array.
Parameters
----------
data : numpy array
The signal data. It can be an array of any dimensions.
axes : dictionary (optional)
Dictionary to define the axes (see the
documentation of the AxesManager class for more details).
attributes : dictionary (optional)
A dictionary whose items are stored as attributes.
metadata : dictionary (optional)
A dictionary containing a set of parameters
that will to stores in the `metadata` attribute.
Some parameters might be mandatory in some cases.
original_metadata : dictionary (optional)
A dictionary containing a set of parameters
that will to stores in the `original_metadata` attribute. It
typically contains all the parameters that has been
imported from the original data file.
"""
self._create_metadata()
self.models = ModelManager(self)
self.learning_results = LearningResults()
kwds['data'] = data
self._load_dictionary(kwds)
self._plot = None
self.inav = SpecialSlicersSignal(self, True)
self.isig = SpecialSlicersSignal(self, False)
self.events = Events()
self.events.data_changed = Event("""
Event that triggers when the data has changed
The event trigger when the data is ready for consumption by any
process that depend on it as input. Plotted signals automatically
connect this Event to its `BaseSignal.plot()`.
Note: The event only fires at certain specific times, not everytime
that the `BaseSignal.data` array changes values.
Arguments:
obj: The signal that owns the data.
""", arguments=['obj'])
def _create_metadata(self):
self.metadata = DictionaryTreeBrowser()
mp = self.metadata
mp.add_node("_HyperSpy")
mp.add_node("General")
mp.add_node("Signal")
mp._HyperSpy.add_node("Folding")
folding = mp._HyperSpy.Folding
folding.unfolded = False
folding.signal_unfolded = False
folding.original_shape = None
folding.original_axes_manager = None
mp.Signal.binned = False
self.original_metadata = DictionaryTreeBrowser()
self.tmp_parameters = DictionaryTreeBrowser()
def __repr__(self):
if self.metadata._HyperSpy.Folding.unfolded:
unfolded = "unfolded "
else:
unfolded = ""
string = '<'
string += self.__class__.__name__
string += ", title: %s" % self.metadata.General.title
string += ", %sdimensions: %s" % (
unfolded,
self.axes_manager._get_dimension_str())
string += '>'
return string
def _binary_operator_ruler(self, other, op_name):
exception_message = (
"Invalid dimensions for this operation")
if isinstance(other, BaseSignal):
# Both objects are signals
oam = other.axes_manager
sam = self.axes_manager
if sam.navigation_shape == oam.navigation_shape and \
sam.signal_shape == oam.signal_shape:
# They have the same signal shape.
# The signal axes are aligned but there is
# no guarantee that data axes area aligned so we make sure that
# they are aligned for the operation.
sdata = self._data_aligned_with_axes
odata = other._data_aligned_with_axes
if op_name in INPLACE_OPERATORS:
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns = self._deepcopy_with_new_data(
getattr(sdata, op_name)(odata))
ns.axes_manager._sort_axes()
return ns
else:
# Different navigation and/or signal shapes
if not are_signals_aligned(self, other):
raise ValueError(exception_message)
else:
# They are broadcastable but have different number of axes
ns, no = broadcast_signals(self, other)
sdata = ns.data
odata = no.data
if op_name in INPLACE_OPERATORS:
# This should raise a ValueError if the operation
# changes the shape of the object on the left.
self.data = getattr(sdata, op_name)(odata)
self.axes_manager._sort_axes()
return self
else:
ns.data = getattr(sdata, op_name)(odata)
return ns
else:
# Second object is not a Signal
if op_name in INPLACE_OPERATORS:
getattr(self.data, op_name)(other)
return self
else:
return self._deepcopy_with_new_data(
getattr(self.data, op_name)(other))
def _unary_operator_ruler(self, op_name):
return self._deepcopy_with_new_data(getattr(self.data, op_name)())
def _check_signal_dimension_equals_one(self):
if self.axes_manager.signal_dimension != 1:
raise SignalDimensionError(self.axes_manager.signal_dimension, 1)
def _check_signal_dimension_equals_two(self):
if self.axes_manager.signal_dimension != 2:
raise SignalDimensionError(self.axes_manager.signal_dimension, 2)
def _deepcopy_with_new_data(self, data=None, copy_variance=False):
"""Returns a deepcopy of itself replacing the data.
This method has the advantage over deepcopy that it does not
copy the data what can save precious memory
Parameters
---------
data : {None | np.array}
Returns
-------
ns : Signal
"""
old_np = None
try:
old_data = self.data
self.data = None
old_plot = self._plot
self._plot = None
old_models = self.models._models
if not copy_variance and "Noise_properties" in self.metadata.Signal:
old_np = self.metadata.Signal.Noise_properties
del self.metadata.Signal.Noise_properties
self.models._models = DictionaryTreeBrowser()
ns = self.deepcopy()
ns.data = data
return ns
finally:
self.data = old_data
self._plot = old_plot
self.models._models = old_models
if old_np is not None:
self.metadata.Signal.Noise_properties = old_np
def as_lazy(self, copy_variance=True):
res = self._deepcopy_with_new_data(self.data,
copy_variance=copy_variance)
res._lazy = True
res._assign_subclass()
return res
def _summary(self):
string = "\n\tTitle: "
string += self.metadata.General.title
if self.metadata.has_item("Signal.signal_type"):
string += "\n\tSignal type: "
string += self.metadata.Signal.signal_type
string += "\n\tData dimensions: "
string += str(self.axes_manager.shape)
string += "\n\tData type: "
string += str(self.data.dtype)
return string
def _print_summary(self):
print(self._summary())
@property
def data(self):
return self._data
@data.setter
def data(self, value):
from dask.array import Array
if isinstance(value, Array):
if not value.ndim:
value = value.reshape((1,))
self._data = value
else:
self._data = np.atleast_1d(np.asanyarray(value))
def _load_dictionary(self, file_data_dict):
"""Load data from dictionary.
Parameters
----------
file_data_dict : dictionary
A dictionary containing at least a 'data' keyword with an array of
arbitrary dimensions. Additionally the dictionary can contain the
following items:
data : numpy array
The signal data. It can be an array of any dimensions.
axes : dictionary (optional)
Dictionary to define the axes (see the
documentation of the AxesManager class for more details).
attributes : dictionary (optional)
A dictionary whose items are stored as attributes.
metadata : dictionary (optional)
A dictionary containing a set of parameters
that will to stores in the `metadata` attribute.
Some parameters might be mandatory in some cases.
original_metadata : dictionary (optional)
A dictionary containing a set of parameters
that will to stores in the `original_metadata` attribute. It
typically contains all the parameters that has been
imported from the original data file.
"""
self.data = file_data_dict['data']
oldlazy = self._lazy
if 'models' in file_data_dict:
self.models._add_dictionary(file_data_dict['models'])
if 'axes' not in file_data_dict:
file_data_dict['axes'] = self._get_undefined_axes_list()
self.axes_manager = AxesManager(
file_data_dict['axes'])
if 'metadata' not in file_data_dict:
file_data_dict['metadata'] = {}
if 'original_metadata' not in file_data_dict:
file_data_dict['original_metadata'] = {}
if 'attributes' in file_data_dict:
for key, value in file_data_dict['attributes'].items():
if hasattr(self, key):
if isinstance(value, dict):
for k, v in value.items():
setattr(getattr(self, key), k, v)
else:
setattr(self, key, value)
self.original_metadata.add_dictionary(
file_data_dict['original_metadata'])
self.metadata.add_dictionary(
file_data_dict['metadata'])
if "title" not in self.metadata.General:
self.metadata.General.title = ''
if (self._signal_type or not self.metadata.has_item("Signal.signal_type")):
self.metadata.Signal.signal_type = self._signal_type
if "learning_results" in file_data_dict:
self.learning_results.__dict__.update(
file_data_dict["learning_results"])
if self._lazy is not oldlazy:
self._assign_subclass()
# TODO: try to find a way to use dask ufuncs when called with lazy data (e.g.
# np.log(s) -> da.log(s.data) wrapped.
def __array__(self, dtype=None):
if dtype:
return self.data.astype(dtype)
else:
return self.data
def __array_wrap__(self, array, context=None):
signal = self._deepcopy_with_new_data(array)
if context is not None:
# ufunc, argument of the ufunc, domain of the ufunc
# In ufuncs with multiple outputs, domain indicates which output
# is currently being prepared (eg. see modf).
# In ufuncs with a single output, domain is 0
uf, objs, huh = context
def get_title(signal, i=0):
g = signal.metadata.General
if g.title:
return g.title
else:
return "Untitled Signal %s" % (i + 1)
title_strs = []
i = 0
for obj in objs:
if isinstance(obj, BaseSignal):
title_strs.append(get_title(obj, i))
i += 1
else:
title_strs.append(str(obj))
signal.metadata.General.title = "%s(%s)" % (
uf.__name__, ", ".join(title_strs))
return signal
def squeeze(self):
"""Remove single-dimensional entries from the shape of an array
and the axes.
"""
# We deepcopy everything but data
self = self._deepcopy_with_new_data(self.data)
for axis in self.axes_manager._axes:
if axis.size == 1:
self._remove_axis(axis.index_in_axes_manager)
self.data = self.data.squeeze()
return self
def _to_dictionary(self, add_learning_results=True, add_models=False):
"""Returns a dictionary that can be used to recreate the signal.
All items but `data` are copies.
Parameters
----------
add_learning_results : bool
Returns
-------
dic : dictionary
"""
dic = {'data': self.data,
'axes': self.axes_manager._get_axes_dicts(),
'metadata': self.metadata.deepcopy().as_dictionary(),
'original_metadata':
self.original_metadata.deepcopy().as_dictionary(),
'tmp_parameters':
self.tmp_parameters.deepcopy().as_dictionary(),
'attributes': {'_lazy': self._lazy},
}
if add_learning_results and hasattr(self, 'learning_results'):
dic['learning_results'] = copy.deepcopy(
self.learning_results.__dict__)
if add_models:
dic['models'] = self.models._models.as_dictionary()
return dic
def _get_undefined_axes_list(self):
axes = []
for s in self.data.shape:
axes.append({'size': int(s), })
return axes
def __call__(self, axes_manager=None):
if axes_manager is None:
axes_manager = self.axes_manager
return np.atleast_1d(
self.data.__getitem__(axes_manager._getitem_tuple))
def plot(self, navigator="auto", axes_manager=None,
plot_markers=True, **kwargs):
"""%s
%s
"""
if self._plot is not None:
try:
self._plot.close()
except:
# If it was already closed it will raise an exception,
# but we want to carry on...
pass
if axes_manager is None:
axes_manager = self.axes_manager
if self.is_rgbx is True:
if axes_manager.navigation_size < 2:
navigator = None
else:
navigator = "slider"
if axes_manager.signal_dimension == 0:
self._plot = mpl_he.MPL_HyperExplorer()
elif axes_manager.signal_dimension == 1:
# Hyperspectrum
self._plot = mpl_hse.MPL_HyperSignal1D_Explorer()
elif axes_manager.signal_dimension == 2:
self._plot = mpl_hie.MPL_HyperImage_Explorer()
else:
raise ValueError(
"Plotting is not supported for this view. "
"Try e.g. 's.transpose(signal_axes=1).plot()' for "
"plotting as a 1D signal, or "
"'s.transpose(signal_axes=(1,2)).plot()' "
"for plotting as a 2D signal.")
self._plot.axes_manager = axes_manager
self._plot.signal_data_function = self.__call__
if self.metadata.General.title:
self._plot.signal_title = self.metadata.General.title
elif self.tmp_parameters.has_item('filename'):
self._plot.signal_title = self.tmp_parameters.filename
if self.metadata.has_item("Signal.quantity"):
self._plot.quantity_label = self.metadata.Signal.quantity
def get_static_explorer_wrapper(*args, **kwargs):
return navigator()
def get_1D_sum_explorer_wrapper(*args, **kwargs):
navigator = self
# Sum over all but the first navigation axis.
am = navigator.axes_manager
navigator = navigator.sum(am.signal_axes + am.navigation_axes[1:])
return np.nan_to_num(navigator.data).squeeze()
def get_dynamic_explorer_wrapper(*args, **kwargs):
navigator.axes_manager.indices = self.axes_manager.indices[
navigator.axes_manager.signal_dimension:]
navigator.axes_manager._update_attributes()
if np.issubdtype(navigator().dtype, complex):
return np.abs(navigator())
else:
return navigator()
if not isinstance(navigator, BaseSignal) and navigator == "auto":
if (self.axes_manager.navigation_dimension == 1 and
self.axes_manager.signal_dimension == 1):
navigator = "data"
elif self.axes_manager.navigation_dimension > 0:
if self.axes_manager.signal_dimension == 0:
navigator = self.deepcopy()
else:
navigator = interactive(
self.sum,
self.events.data_changed,
self.axes_manager.events.any_axis_changed,
self.axes_manager.signal_axes)
if navigator.axes_manager.navigation_dimension == 1:
navigator = interactive(
navigator.as_signal1D,
navigator.events.data_changed,
navigator.axes_manager.events.any_axis_changed, 0)
else:
navigator = interactive(
navigator.as_signal2D,
navigator.events.data_changed,
navigator.axes_manager.events.any_axis_changed,
(0, 1))
else:
navigator = None
# Navigator properties
if axes_manager.navigation_axes:
if navigator is "slider":
self._plot.navigator_data_function = "slider"
elif navigator is None:
self._plot.navigator_data_function = None
elif isinstance(navigator, BaseSignal):
# Dynamic navigator
if (axes_manager.navigation_shape ==
navigator.axes_manager.signal_shape +
navigator.axes_manager.navigation_shape):
self._plot.navigator_data_function = get_dynamic_explorer_wrapper
elif (axes_manager.navigation_shape ==
navigator.axes_manager.signal_shape or
axes_manager.navigation_shape[:2] ==
navigator.axes_manager.signal_shape or
(axes_manager.navigation_shape[0],) ==
navigator.axes_manager.signal_shape):
self._plot.navigator_data_function = get_static_explorer_wrapper
else:
raise ValueError(
"The navigator dimensions are not compatible with "
"those of self.")
elif navigator == "data":
if np.issubdtype(self.data.dtype, complex):
self._plot.navigator_data_function = lambda axes_manager=None: np.abs(
self.data)
else:
self._plot.navigator_data_function = lambda axes_manager=None: self.data
elif navigator == "spectrum":
self._plot.navigator_data_function = get_1D_sum_explorer_wrapper
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
self._plot.plot(**kwargs)
self.events.data_changed.connect(self.update_plot, [])
if self._plot.signal_plot:
self._plot.signal_plot.events.closed.connect(
lambda: self.events.data_changed.disconnect(self.update_plot),
[])
if plot_markers:
if self.metadata.has_item('Markers'):
self._plot_permanent_markers()
plot.__doc__ %= BASE_PLOT_DOCSTRING, KWARGS_DOCSTRING
def save(self, filename=None, overwrite=None, extension=None,
**kwds):
"""Saves the signal in the specified format.
The function gets the format from the extension.:
- hspy for HyperSpy's HDF5 specification
- rpl for Ripple (useful to export to Digital Micrograph)
- msa for EMSA/MSA single spectrum saving.
- unf for SEMPER unf binary format.
- blo for Blockfile diffraction stack saving.
- Many image formats such as png, tiff, jpeg...
If no extension is provided the default file format as defined
in the `preferences` is used.
Please note that not all the formats supports saving datasets of
arbitrary dimensions, e.g. msa only supports 1D data, and blockfiles
only support image stacks with a navigation dimension < 2.
Each format accepts a different set of parameters. For details
see the specific format documentation.
Parameters
----------
filename : str or None
If None (default) and tmp_parameters.filename and
`tmp_paramters.folder` are defined, the
filename and path will be taken from there. A valid
extension can be provided e.g. "my_file.rpl", see `extension`.
overwrite : None, bool
If None, if the file exists it will query the user. If
True(False) it (does not) overwrites the file if it exists.
extension : {None, 'hspy', 'hdf5', 'rpl', 'msa', 'unf', 'blo',
'emd', common image extensions e.g. 'tiff', 'png'}
The extension of the file that defines the file format.
'hspy' and 'hdf5' are equivalent. Use 'hdf5' if compatibility with
HyperSpy versions older than 1.2 is required.
If None, the extension is determined from the following list in
this order:
i) the filename
ii) `Signal.tmp_parameters.extension`
iii) `hspy` (the default extension)
"""
if filename is None:
if (self.tmp_parameters.has_item('filename') and
self.tmp_parameters.has_item('folder')):
filename = os.path.join(
self.tmp_parameters.folder,
self.tmp_parameters.filename)
extension = (self.tmp_parameters.extension
if not extension
else extension)
elif self.metadata.has_item('General.original_filename'):
filename = self.metadata.General.original_filename
else:
raise ValueError('File name not defined')
if extension is not None:
basename, ext = os.path.splitext(filename)
filename = basename + '.' + extension
io.save(filename, self, overwrite=overwrite, **kwds)
def _replot(self):
if self._plot is not None:
if self._plot.is_active() is True:
self.plot()
def update_plot(self):
if self._plot is not None:
if self._plot.is_active() is True:
if self._plot.signal_plot is not None:
self._plot.signal_plot.update()
if self._plot.navigator_plot is not None:
self._plot.navigator_plot.update()
def get_dimensions_from_data(self):
"""Get the dimension parameters from the data_cube. Useful when
the data_cube was externally modified, or when the SI was not
loaded from a file
"""
dc = self.data
for axis in self.axes_manager._axes:
axis.size = int(dc.shape[axis.index_in_array])
def crop(self, axis, start=None, end=None):
"""Crops the data in a given axis. The range is given in pixels
Parameters
----------
axis : {int | string}
Specify the data axis in which to perform the cropping
operation. The axis can be specified using the index of the
axis in `axes_manager` or the axis name.
start, end : {int | float | None}
The beginning and end of the cropping interval. If int
the value is taken as the axis index. If float the index
is calculated using the axis calibration. If start/end is
None crop from/to the low/high end of the axis.
"""
axis = self.axes_manager[axis]
i1, i2 = axis._get_index(start), axis._get_index(end)
if i1 is not None:
new_offset = axis.axis[i1]
# We take a copy to guarantee the continuity of the data
self.data = self.data[
(slice(None),) * axis.index_in_array + (slice(i1, i2),
Ellipsis)]
if i1 is not None:
axis.offset = new_offset
self.get_dimensions_from_data()
self.squeeze()
self.events.data_changed.trigger(obj=self)
def swap_axes(self, axis1, axis2):
"""Swaps the axes.
Parameters
----------
axis1, axis2 %s
Returns
-------
s : a copy of the object with the axes swapped.
"""
axis1 = self.axes_manager[axis1].index_in_array
axis2 = self.axes_manager[axis2].index_in_array
s = self._deepcopy_with_new_data(self.data.swapaxes(axis1, axis2))
am = s.axes_manager
am._update_trait_handlers(remove=True)
c1 = am._axes[axis1]
c2 = am._axes[axis2]
c1.slice, c2.slice = c2.slice, c1.slice
c1.navigate, c2.navigate = c2.navigate, c1.navigate
am._axes[axis1] = c2
am._axes[axis2] = c1
am._update_attributes()
am._update_trait_handlers(remove=False)
s._make_sure_data_is_contiguous()
return s
swap_axes.__doc__ %= ONE_AXIS_PARAMETER
def rollaxis(self, axis, to_axis):
"""Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
axis %s The axis to roll backwards.
The positions of the other axes do not change relative to one another.
to_axis %s The axis is rolled until it
lies before this other axis.
Returns
-------
s : Signal or subclass
Output signal.
See Also
--------
roll : swap_axes
Examples
--------
>>> s = hs.signals.Signal1D(np.ones((5,4,3,6)))
>>> s
<Signal1D, title: , dimensions: (3, 4, 5, 6)>
>>> s.rollaxis(3, 1)
<Signal1D, title: , dimensions: (3, 4, 5, 6)>
>>> s.rollaxis(2,0)
<Signal1D, title: , dimensions: (5, 3, 4, 6)>
"""
axis = self.axes_manager[axis].index_in_array
to_index = self.axes_manager[to_axis].index_in_array
if axis == to_index:
return self.deepcopy()
new_axes_indices = hyperspy.misc.utils.rollelem(
[axis_.index_in_array for axis_ in self.axes_manager._axes],
index=axis,
to_index=to_index)
s = self._deepcopy_with_new_data(self.data.transpose(new_axes_indices))
s.axes_manager._axes = hyperspy.misc.utils.rollelem(
s.axes_manager._axes,
index=axis,
to_index=to_index)
s.axes_manager._update_attributes()
s._make_sure_data_is_contiguous()
return s
rollaxis.__doc__ %= (ONE_AXIS_PARAMETER, ONE_AXIS_PARAMETER)
@property
def _data_aligned_with_axes(self):
"""Returns a view of `data` with is axes aligned with the Signal axes.
"""
if self.axes_manager.axes_are_aligned_with_data:
return self.data
else:
am = self.axes_manager
nav_iia_r = am.navigation_indices_in_array[::-1]
sig_iia_r = am.signal_indices_in_array[::-1]
# nav_sort = np.argsort(nav_iia_r)
# sig_sort = np.argsort(sig_iia_r) + len(nav_sort)
data = self.data.transpose(nav_iia_r + sig_iia_r)
return data
def _validate_rebin_args_and_get_factors(self, new_shape=None, scale=None):
if new_shape is None and scale is None:
raise ValueError("One of new_shape, or scale must be specified")
elif new_shape is None and scale is None:
raise ValueError(
"Only one out of new_shape or scale should be specified. "
"Not both.")
elif new_shape:
if len(new_shape) != len(self.data.shape):
raise ValueError("Wrong new_shape size")
new_shape_in_array = np.array([new_shape[axis.index_in_axes_manager]
for axis in self.axes_manager._axes])
factors = np.array(self.data.shape) / new_shape_in_array
else:
if len(scale) != len(self.data.shape):
raise ValueError("Wrong scale size")
factors = np.array([scale[axis.index_in_axes_manager]
for axis in self.axes_manager._axes])
return factors # Factors are in array order
def rebin(self, new_shape=None, scale=None, crop=True, out=None):
"""
Rebin array.
Rebin the signal into a smaller or larger shape, based on linear
interpolation. Specify **either** new_shape or scale.
Parameters
----------
new_shape : a list of floats or integer, default None
For each dimension specify the new_shape. This will
then be converted into a scale.
scale : a list of floats or integer, default None
For each dimension specify the new:old pixel ratio, e.g. a ratio of 1
is no binning and a ratio of 2 means that each pixel in the new
spectrum is twice the size of the pixels in the old spectrum.
The length of the list should match the dimension of the numpy array.
***Note : Only one of scale or new_shape should be specified otherwise
the function will not run***
crop: bool, default True
When binning by a non-integer number of pixels it is likely that
the final row in each dimension contains less than the full quota to
fill one pixel.
e.g. 5*5 array binned by 2.1 will produce two rows containing 2.1
pixels and one row containing only 0.8 pixels worth. Selection of
crop='True' or crop='False' determines whether or not this
'black' line is cropped from the final binned array or not.
*Please note that if crop=False is used, the final row in each
dimension may appear black, if a fractional number of pixels are left
over. It can be removed but has been left to preserve total counts
before and after binning.*
%s
Returns
-------
s : Signal subclass
Examples
--------
>>> spectrum = hs.signals.EDSTEMSpectrum(np.ones([4, 4, 10]))
>>> spectrum.data[1, 2, 9] = 5
>>> print(spectrum)
<EDXTEMSpectrum, title: dimensions: (4, 4|10)>
>>> print ('Sum = ', sum(sum(sum(spectrum.data))))
Sum = 164.0
>>> scale = [2, 2, 5]
>>> test = spectrum.rebin(scale)
>>> print(test)
<EDSTEMSpectrum, title: dimensions (2, 2|2)>
>>> print('Sum = ', sum(sum(sum(test.data))))
Sum = 164.0
"""
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale,)
s = out or self._deepcopy_with_new_data(None, copy_variance=True)
data = hyperspy.misc.array_tools.rebin(
self.data, scale=factors, crop=crop)
if out:
if out._lazy:
out.data = data
else:
out.data[:] = data
else:
s.data = data
s.get_dimensions_from_data()
for axis, axis_src in zip(s.axes_manager._axes,
self.axes_manager._axes):
axis.scale = axis_src.scale * factors[axis.index_in_array]
if s.metadata.has_item('Signal.Noise_properties.variance'):
if isinstance(s.metadata.Signal.Noise_properties.variance,
BaseSignal):
var = s.metadata.Signal.Noise_properties.variance
s.metadata.Signal.Noise_properties.variance = var.rebin(
new_shape=new_shape, scale=scale, crop=crop, out=out)
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
rebin.__doc__ %= (OUT_ARG)
def split(self,
axis='auto',
number_of_parts='auto',
step_sizes='auto'):
"""Splits the data into several signals.
The split can be defined by giving the number_of_parts, a homogeneous
step size or a list of customized step sizes. By default ('auto'),
the function is the reverse of utils.stack().
Parameters
----------
axis : {'auto' | int | string}
Specify the data axis in which to perform the splitting
operation. The axis can be specified using the index of the
axis in `axes_manager` or the axis name.
- If 'auto' and if the object has been created with utils.stack,
split will return the former list of signals
(options stored in 'metadata._HyperSpy.Stacking_history'
else the last navigation axis will be used.
number_of_parts : {'auto' | int}
Number of parts in which the SI will be splitted. The
splitting is homegenous. When the axis size is not divisible
by the number_of_parts the reminder data is lost without
warning. If number_of_parts and step_sizes is 'auto',
number_of_parts equals the length of the axis,
step_sizes equals one and the axis is suppressed from each
sub_spectra.
step_sizes : {'auto' | list of ints | int}
Size of the splitted parts. If 'auto', the step_sizes equals one.
If int, the splitting is homogenous.
Examples
--------
>>> s = hs.signals.Signal1D(random.random([4,3,2]))
>>> s
<Signal1D, title: , dimensions: (3, 4|2)>
>>> s.split()
[<Signal1D, title: , dimensions: (3 |2)>,
<Signal1D, title: , dimensions: (3 |2)>,
<Signal1D, title: , dimensions: (3 |2)>,
<Signal1D, title: , dimensions: (3 |2)>]
>>> s.split(step_sizes=2)
[<Signal1D, title: , dimensions: (3, 2|2)>,
<Signal1D, title: , dimensions: (3, 2|2)>]
>>> s.split(step_sizes=[1,2])
[<Signal1D, title: , dimensions: (3, 1|2)>,
<Signal1D, title: , dimensions: (3, 2|2)>]
Returns
-------
list of the splitted signals
"""
shape = self.data.shape
signal_dict = self._to_dictionary(add_learning_results=False)
if axis == 'auto':
mode = 'auto'
if hasattr(self.metadata._HyperSpy, 'Stacking_history'):
stack_history = self.metadata._HyperSpy.Stacking_history
axis_in_manager = stack_history.axis
step_sizes = stack_history.step_sizes
else:
axis_in_manager = self.axes_manager[-1 +
1j].index_in_axes_manager
else:
mode = 'manual'
axis_in_manager = self.axes_manager[axis].index_in_axes_manager
axis = self.axes_manager[axis_in_manager].index_in_array
len_axis = self.axes_manager[axis_in_manager].size
if number_of_parts is 'auto' and step_sizes is 'auto':
step_sizes = 1
number_of_parts = len_axis
elif number_of_parts is not 'auto' and step_sizes is not 'auto':
raise ValueError(
"You can define step_sizes or number_of_parts "
"but not both.")
elif step_sizes is 'auto':
if number_of_parts > shape[axis]:
raise ValueError(
"The number of parts is greater than "
"the axis size.")
else:
step_sizes = ([shape[axis] // number_of_parts, ] *
number_of_parts)
if isinstance(step_sizes, numbers.Integral):
step_sizes = [step_sizes] * int(len_axis / step_sizes)
splitted = []
cut_index = np.array([0] + step_sizes).cumsum()
axes_dict = signal_dict['axes']
for i in range(len(cut_index) - 1):
axes_dict[axis]['offset'] = self.axes_manager._axes[
axis].index2value(cut_index[i])
axes_dict[axis]['size'] = cut_index[i + 1] - cut_index[i]
data = self.data[
(slice(None), ) * axis +
(slice(cut_index[i], cut_index[i + 1]), Ellipsis)]
signal_dict['data'] = data
splitted += self.__class__(**signal_dict),
if number_of_parts == len_axis \
or step_sizes == [1] * len_axis:
for i, signal1D in enumerate(splitted):
signal1D.data = signal1D.data[
signal1D.axes_manager._get_data_slice([(axis, 0)])]
signal1D._remove_axis(axis_in_manager)
if mode == 'auto' and hasattr(
self.original_metadata, 'stack_elements'):
for i, spectrum in enumerate(splitted):
se = self.original_metadata.stack_elements['element' + str(i)]
spectrum.metadata = copy.deepcopy(
se['metadata'])
spectrum.original_metadata = copy.deepcopy(
se['original_metadata'])
spectrum.metadata.General.title = se.metadata.General.title
return splitted
def _unfold(self, steady_axes, unfolded_axis):
"""Modify the shape of the data by specifying the axes whose
dimension do not change and the axis over which the remaining axes will
be unfolded
Parameters
----------
steady_axes : list
The indices of the axes which dimensions do not change
unfolded_axis : int
The index of the axis over which all the rest of the axes (except
the steady axes) will be unfolded
See also
--------
fold
Notes
-----
WARNING: this private function does not modify the signal subclass
and it is intended for internal use only. To unfold use the public
`unfold`, `unfold_navigation_space` or `unfold_signal_space` instead.
It doesn't make sense unfolding when dim < 2
"""
if self.data.squeeze().ndim < 2:
return
# We need to store the original shape and coordinates to be used
# by
# the fold function only if it has not been already stored by a
# previous unfold
folding = self.metadata._HyperSpy.Folding
if folding.unfolded is False:
folding.original_shape = self.data.shape
folding.original_axes_manager = self.axes_manager
folding.unfolded = True
new_shape = [1] * len(self.data.shape)
for index in steady_axes:
new_shape[index] = self.data.shape[index]
new_shape[unfolded_axis] = -1
self.data = self.data.reshape(new_shape)
self.axes_manager = self.axes_manager.deepcopy()
uname = ''
uunits = ''
to_remove = []
for axis, dim in zip(self.axes_manager._axes, new_shape):
if dim == 1:
uname += ',' + str(axis)
uunits = ',' + str(axis.units)
to_remove.append(axis)
ua = self.axes_manager._axes[unfolded_axis]
ua.name = str(ua) + uname
ua.units = str(ua.units) + uunits
ua.size = self.data.shape[unfolded_axis]
for axis in to_remove:
self.axes_manager.remove(axis.index_in_axes_manager)
self.data = self.data.squeeze()
self._assign_subclass()
def unfold(self, unfold_navigation=True, unfold_signal=True):
"""Modifies the shape of the data by unfolding the signal and
navigation dimensions separately
Returns
-------
needed_unfolding : bool
"""
unfolded = False
if unfold_navigation:
if self.unfold_navigation_space():
unfolded = True
if unfold_signal:
if self.unfold_signal_space():
unfolded = True
return unfolded
@contextmanager
def unfolded(self, unfold_navigation=True, unfold_signal=True):
"""Use this function together with a `with` statement to have the
signal be unfolded for the scope of the `with` block, before
automatically refolding when passing out of scope.
See also
--------
unfold, fold
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> with s.unfolded():
# Do whatever needs doing while unfolded here
pass
"""
unfolded = self.unfold(unfold_navigation, unfold_signal)
try:
yield unfolded
finally:
if unfolded is not False:
self.fold()
def unfold_navigation_space(self):
"""Modify the shape of the data to obtain a navigation space of
dimension 1
Returns
-------
needed_unfolding : bool
"""
if self.axes_manager.navigation_dimension < 2:
needed_unfolding = False
else:
needed_unfolding = True
steady_axes = [
axis.index_in_array for axis in
self.axes_manager.signal_axes]
unfolded_axis = (
self.axes_manager.navigation_axes[0].index_in_array)
self._unfold(steady_axes, unfolded_axis)
if self.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance.unfold_navigation_space()
return needed_unfolding
def unfold_signal_space(self):
"""Modify the shape of the data to obtain a signal space of
dimension 1
Returns
-------
needed_unfolding : bool
"""
if self.axes_manager.signal_dimension < 2:
needed_unfolding = False
else:
needed_unfolding = True
steady_axes = [
axis.index_in_array for axis in
self.axes_manager.navigation_axes]
unfolded_axis = self.axes_manager.signal_axes[0].index_in_array
self._unfold(steady_axes, unfolded_axis)
self.metadata._HyperSpy.Folding.signal_unfolded = True
if self.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance.unfold_signal_space()
return needed_unfolding
def fold(self):
"""If the signal was previously unfolded, folds it back"""
folding = self.metadata._HyperSpy.Folding
# Note that == must be used instead of is True because
# if the value was loaded from a file its type can be np.bool_
if folding.unfolded is True:
self.data = self.data.reshape(folding.original_shape)
self.axes_manager = folding.original_axes_manager
folding.original_shape = None
folding.original_axes_manager = None
folding.unfolded = False
folding.signal_unfolded = False
self._assign_subclass()
if self.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance.fold()
def _make_sure_data_is_contiguous(self, log=False):
if self.data.flags['C_CONTIGUOUS'] is False:
if log:
_warn_string = "{0!r} data is replaced by its optimized copy".format(
self)
_logger.warning(_warn_string)
self.data = np.ascontiguousarray(self.data)
def _iterate_signal(self):
"""Iterates over the signal data.
It is faster than using the signal iterator.
"""
if self.axes_manager.navigation_size < 2:
yield self()
return
self._make_sure_data_is_contiguous()
axes = [axis.index_in_array for
axis in self.axes_manager.signal_axes]
if axes:
unfolded_axis = (
self.axes_manager.navigation_axes[0].index_in_array)
new_shape = [1] * len(self.data.shape)
for axis in axes:
new_shape[axis] = self.data.shape[axis]
new_shape[unfolded_axis] = -1
else: # signal_dimension == 0
new_shape = (-1, 1)
axes = [1]
unfolded_axis = 0
# Warning! if the data is not contigous it will make a copy!!
data = self.data.reshape(new_shape)
getitem = [0] * len(data.shape)
for axis in axes:
getitem[axis] = slice(None)
for i in range(data.shape[unfolded_axis]):
getitem[unfolded_axis] = i
yield(data[tuple(getitem)])
def _remove_axis(self, axes):
am = self.axes_manager
axes = am[axes]
if not np.iterable(axes):
axes = (axes,)
if am.navigation_dimension + am.signal_dimension > len(axes):
old_signal_dimension = am.signal_dimension
am.remove(axes)
if old_signal_dimension != am.signal_dimension:
self._assign_subclass()
else:
# Create a "Scalar" axis because the axis is the last one left and
# HyperSpy does not # support 0 dimensions
from hyperspy.misc.utils import add_scalar_axis
add_scalar_axis(self)
def _ma_workaround(self, s, function, axes, ar_axes, out):
# TODO: Remove if and when numpy.ma accepts tuple `axis`
# Basically perform unfolding, but only on data. We don't care about
# the axes since the function will consume it/them.
if not np.iterable(ar_axes):
ar_axes = (ar_axes,)
ar_axes = sorted(ar_axes)
new_shape = list(self.data.shape)
for index in ar_axes[1:]:
new_shape[index] = 1
new_shape[ar_axes[0]] = -1
data = self.data.reshape(new_shape).squeeze()
if out:
data = np.atleast_1d(function(data, axis=ar_axes[0],))
if data.shape == out.data.shape:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (data.shape, out.data.shape))
else:
s.data = function(data, axis=ar_axes[0],)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None):
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes,)
# Use out argument in numpy function when available for operations that
# do not return scalars in numpy.
np_out = not len(self.axes_manager._axes) == len(axes)
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 1:
ar_axes = ar_axes[0]
s = out or self._deepcopy_with_new_data(None)
if np.ma.is_masked(self.data):
return self._ma_workaround(s=s, function=function, axes=axes,
ar_axes=ar_axes, out=out)
if out:
if np_out:
function(self.data, axis=ar_axes, out=out.data,)
else:
data = np.atleast_1d(function(self.data, axis=ar_axes,))
if data.shape == out.data.shape:
out.data[:] = data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (data.shape, out.data.shape))
out.events.data_changed.trigger(obj=out)
else:
s.data = np.atleast_1d(
function(self.data, axis=ar_axes,))
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def sum(self, axis=None, out=None):
"""Sum the data over the given axes.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
max, min, mean, std, var, indexmax, valuemax, amax
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.sum(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.sum, axis,
out=out)
sum.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG)
def max(self, axis=None, out=None):
"""Returns a signal with the maximum of the signal along at least one
axis.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
min, sum, mean, std, var, indexmax, valuemax, amax
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.max(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.max, axis,
out=out)
max.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG)
def min(self, axis=None, out=None):
"""Returns a signal with the minimum of the signal along at least one
axis.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
max, sum, mean, std, var, indexmax, valuemax, amax
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.min(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.min, axis,
out=out)
min.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG)
def mean(self, axis=None, out=None):
"""Returns a signal with the average of the signal along at least one
axis.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
max, min, sum, std, var, indexmax, valuemax, amax
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.mean(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.mean, axis,
out=out)
mean.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG)
def std(self, axis=None, out=None):
"""Returns a signal with the standard deviation of the signal along
at least one axis.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
max, min, sum, mean, var, indexmax, valuemax, amax
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.std(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.std, axis,
out=out)
std.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG)
def var(self, axis=None, out=None):
"""Returns a signal with the variances of the signal along at least one
axis.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
max, min, sum, mean, std, indexmax, valuemax, amax
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.var(-1).data.shape
(64,64)
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.var, axis,
out=out)
var.__doc__ %= (MANY_AXIS_PARAMETER, OUT_ARG)
def nansum(self, axis=None, out=None):
"""%s
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.nansum, axis,
out=out)
nansum.__doc__ %= (NAN_FUNC.format('sum', sum.__doc__))
def nanmax(self, axis=None, out=None):
"""%s
"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.nanmax, axis,
out=out)
nanmax.__doc__ %= (NAN_FUNC.format('max', max.__doc__))
def nanmin(self, axis=None, out=None):
"""%s"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.nanmin, axis,
out=out)
nanmin.__doc__ %= (NAN_FUNC.format('min', min.__doc__))
def nanmean(self, axis=None, out=None):
"""%s """
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.nanmean, axis,
out=out)
nanmean.__doc__ %= (NAN_FUNC.format('mean', mean.__doc__))
def nanstd(self, axis=None, out=None):
"""%s"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.nanstd, axis,
out=out)
nanstd.__doc__ %= (NAN_FUNC.format('std', std.__doc__))
def nanvar(self, axis=None, out=None):
"""%s"""
if axis is None:
axis = self.axes_manager.navigation_axes
return self._apply_function_on_data_and_remove_axis(np.nanvar, axis,
out=out)
nanvar.__doc__ %= (NAN_FUNC.format('var', var.__doc__))
def diff(self, axis, order=1, out=None):
"""Returns a signal with the n-th order discrete difference along
given axis.
Parameters
----------
axis %s
order : int
the order of the derivative
%s
See also
--------
max, min, sum, mean, std, var, indexmax, valuemax, amax
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.diff(-1).data.shape
(64,64,1023)
"""
s = out or self._deepcopy_with_new_data(None)
data = np.diff(self.data, n=order,
axis=self.axes_manager[axis].index_in_array)
if out is not None:
out.data[:] = data
else:
s.data = data
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def derivative(self, axis, order=1, out=None):
"""Numerical derivative along the given axis.
Currently only the first order finite difference method is implemented.
Parameters
----------
axis %s
order: int
The order of the derivative. (Note that this is the order of the
derivative i.e. `order=2` does not use second order finite
differences method.)
%s
Returns
-------
der : Signal
Note that the size of the data on the given `axis` decreases by the
given `order` i.e. if `axis` is "x" and `order` is 2 the
x dimension is N, der's x dimension is N - 2.
See also
--------
diff
"""
der = self.diff(order=order, axis=axis, out=out)
der = out or der
axis = self.axes_manager[axis]
der.data /= axis.scale ** order
if out is None:
return der
else:
out.events.data_changed.trigger(obj=out)
derivative.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def integrate_simpson(self, axis, out=None):
"""Returns a signal with the result of calculating the integral
of the signal along an axis using Simpson's rule.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
max, min, sum, mean, std, var, indexmax, valuemax, amax
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.var(-1).data.shape
(64,64)
"""
axis = self.axes_manager[axis]
s = out or self._deepcopy_with_new_data(None)
data = sp.integrate.simps(y=self.data, x=axis.axis,
axis=axis.index_in_array)
if out is not None:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
else:
s.data = data
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def integrate1D(self, axis, out=None):
"""Integrate the signal over the given axis.
The integration is performed using Simpson's rule if
`metadata.Signal.binned` is False and summation over the given axis if
True.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
integrate_simpson, diff, derivative
Examples
--------
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.var(-1).data.shape
(64,64)
"""
if self.metadata.Signal.binned is False:
return self.integrate_simpson(axis=axis, out=out)
else:
return self.sum(axis=axis, out=out)
integrate1D.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def indexmin(self, axis, out=None):
"""Returns a signal with the index of the minimum along an axis.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
The data dtype is always int.
See also
--------
max, min, sum, mean, std, var, valuemax, amax
Usage
-----
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.indexmax(-1).data.shape
(64,64)
"""
return self._apply_function_on_data_and_remove_axis(np.argmin, axis,
out=out)
def indexmax(self, axis, out=None):
"""Returns a signal with the index of the maximum along an axis.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
The data dtype is always int.
See also
--------
max, min, sum, mean, std, var, valuemax, amax
Usage
-----
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.indexmax(-1).data.shape
(64,64)
"""
return self._apply_function_on_data_and_remove_axis(np.argmax, axis,
out=out)
indexmax.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def valuemax(self, axis, out=None):
"""Returns a signal with the value of coordinates of the maximum along an axis.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
max, min, sum, mean, std, var, indexmax, amax
Usage
-----
>>> import numpy as np
>>> s = BaseSignal(np.random.random((64,64,1024)))
>>> s.data.shape
(64,64,1024)
>>> s.valuemax(-1).data.shape
(64,64)
"""
idx = self.indexmax(axis)
data = self.axes_manager[axis].index2value(idx.data)
if out is None:
idx.data = data
return idx
else:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def valuemin(self, axis, out=None):
"""Returns a signal with the value of coordinates of the minimum along an axis.
Parameters
----------
axis %s
%s
Returns
-------
s : Signal
See also
--------
max, min, sum, mean, std, var, indexmax, amax
"""
idx = self.indexmin(axis)
data = self.axes_manager[axis].index2value(idx.data)
if out is None:
idx.data = data
return idx
else:
out.data[:] = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def get_histogram(self, bins='freedman', range_bins=None, out=None,
**kwargs):
"""Return a histogram of the signal data.
More sophisticated algorithms for determining bins can be used.
Aside from the `bins` argument allowing a string specified how bins
are computed, the parameters are the same as numpy.histogram().
Parameters
----------
bins : int or list or str, optional
If bins is a string, then it must be one of:
'knuth' : use Knuth's rule to determine bins
'scotts' : use Scott's rule to determine bins
'freedman' : use the Freedman-diaconis rule to determine bins
'blocks' : use bayesian blocks for dynamic bin widths
range_bins : tuple or None, optional
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
%s
**kwargs
other keyword arguments (weight and density) are described in
np.histogram().
Returns
-------
hist_spec : An 1D spectrum instance containing the histogram.
See Also
--------
print_summary_statistics
astroML.density_estimation.histogram, numpy.histogram : these are the
functions that hyperspy uses to compute the histogram.
Notes
-----
The lazy version of the algorithm does not support 'knuth' and 'blocks'
bins arguments.
The number of bins estimators are taken from AstroML. Read
their documentation for more info.
Examples
--------
>>> s = hs.signals.Signal1D(np.random.normal(size=(10, 100)))
Plot the data histogram
>>> s.get_histogram().plot()
Plot the histogram of the signal at the current coordinates
>>> s.get_current_signal().get_histogram().plot()
"""
from hyperspy import signals
data = self.data[~np.isnan(self.data)].flatten()
hist, bin_edges = histogram(data,
bins=bins,
range=range_bins,
**kwargs)
if out is None:
hist_spec = signals.Signal1D(hist)
else:
hist_spec = out
if hist_spec.data.shape == hist.shape:
hist_spec.data[:] = hist
else:
hist_spec.data = hist
if bins == 'blocks':
hist_spec.axes_manager.signal_axes[0].axis = bin_edges[:-1]
warnings.warn(
"The options `bins = 'blocks'` is not fully supported in this "
"versions of hyperspy. It should be used for plotting purpose"
"only.")
else:
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.metadata.General.title = (self.metadata.General.title +
" histogram")
hist_spec.metadata.Signal.binned = True
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ %= OUT_ARG
def map(self, function,
show_progressbar=None,
parallel=None, inplace=True, ragged=None,
**kwargs):
"""Apply a function to the signal data at all the coordinates.
The function must operate on numpy arrays. It is applied to the data at
each navigation coordinate pixel-py-pixel. Any extra keyword argument
is passed to the function. The keywords can take different values at
different coordinates. If the function takes an `axis` or `axes`
argument, the function is assumed to be vectorial and the signal axes
are assigned to `axis` or `axes`. Otherwise, the signal is iterated
over the navigation axes and a progress bar is displayed to monitor the
progress.
In general, only navigation axes (order, calibration and number) is
guaranteed to be preserved.
Parameters
----------
function : function
A function that can be applied to the signal.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
parallel : {None,bool,int}
if True, the mapping will be performed in a threaded (parallel)
manner.
inplace : bool
if True (default), the data is replaced by the result. Otherwise a
new signal with the results is returned.
ragged : {None, bool}
Indicates if results for each navigation pixel are of identical
shape (and/or numpy arrays to begin with). If None, appropriate
choice is made while processing. None is not allowed for Lazy
signals!
keyword arguments : any valid keyword argument
All extra keyword arguments are passed to the
Notes
-----
If the function results do not have identical shapes, the result is an
array of navigation shape, where each element corresponds to the result
of the function (of arbitraty object type), called "ragged array". As
such, most functions are not able to operate on the result and the data
should be used directly.
This method is similar to Python's :func:`map` that can also be utilize
with a :class:`Signal` instance for similar purposes. However, this
method has the advantage of being faster because it iterates the numpy
array instead of the :class:`Signal`.
Examples
--------
Apply a gaussian filter to all the images in the dataset. The sigma
parameter is constant.
>>> import scipy.ndimage
>>> im = hs.signals.Signal2D(np.random.random((10, 64, 64)))
>>> im.map(scipy.ndimage.gaussian_filter, sigma=2.5)
Apply a gaussian filter to all the images in the dataset. The sigmal
parameter is variable.
>>> im = hs.signals.Signal2D(np.random.random((10, 64, 64)))
>>> sigmas = hs.signals.BaseSignal(np.linspace(2,5,10)).T
>>> im.map(scipy.ndimage.gaussian_filter, sigma=sigmas)
"""
# Sepate ndkwargs
ndkwargs = ()
for key, value in kwargs.items():
if isinstance(value, BaseSignal):
ndkwargs += ((key, value),)
# Check if the signal axes have inhomogenous scales and/or units and
# display in warning if yes.
scale = set()
units = set()
for i in range(len(self.axes_manager.signal_axes)):
scale.add(self.axes_manager.signal_axes[i].scale)
units.add(self.axes_manager.signal_axes[i].units)
if len(units) != 1 or len(scale) != 1:
_logger.warning(
"The function you applied does not take into "
"account the difference of units and of scales in-between"
" axes.")
# If the function has an axis argument and the signal dimension is 1,
# we suppose that it can operate on the full array and we don't
# iterate over the coordinates.
try:
fargs = inspect.signature(function).parameters.keys()
except TypeError:
# This is probably a Cython function that is not supported by
# inspect.
fargs = []
if not ndkwargs and (self.axes_manager.signal_dimension == 1 and
"axis" in fargs):
kwargs['axis'] = self.axes_manager.signal_axes[-1].index_in_array
res = self._map_all(function, inplace=inplace, **kwargs)
# If the function has an axes argument
# we suppose that it can operate on the full array and we don't
# iterate over the coordinates.
elif not ndkwargs and "axes" in fargs and not parallel:
kwargs['axes'] = tuple([axis.index_in_array for axis in
self.axes_manager.signal_axes])
res = self._map_all(function, inplace=inplace, **kwargs)
else:
# Iteration over coordinates.
res = self._map_iterate(function, iterating_kwargs=ndkwargs,
show_progressbar=show_progressbar,
parallel=parallel, inplace=inplace,
ragged=ragged,
**kwargs)
if inplace:
self.events.data_changed.trigger(obj=self)
return res
def _map_all(self, function, inplace=True, **kwargs):
"""The function has to have either 'axis' or 'axes' keyword argument,
and hence support operating on the full dataset efficiently.
Replaced for lazy signals"""
newdata = function(self.data, **kwargs)
if inplace:
self.data = newdata
return None
return self._deepcopy_with_new_data(newdata)
def _map_iterate(self, function, iterating_kwargs=(),
show_progressbar=None, parallel=None,
ragged=None,
inplace=True, **kwargs):
"""Iterates the signal navigation space applying the function.
Paratemers
----------
function : callable
the function to apply
iterating_kwargs : tuple of tuples
a tuple with structure (('key1', value1), ('key2', value2), ..)
where the key-value pairs will be passed as kwargs for the
callable, and the values will be iterated together with the signal
navigation.
parallel : {None, bool}
if True, the mapping will be performed in a threaded (parallel)
manner. If None the default from `preferences` is used.
inplace : bool
if True (default), the data is replaced by the result. Otherwise a
new signal with the results is returned.
ragged : {None, bool}
Indicates if results for each navigation pixel are of identical
shape (and/or numpy arrays to begin with). If None, appropriate
choice is made while processing. None is not allowed for Lazy
signals!
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
**kwargs
passed to the function as constant kwargs
Notes
-----
This method is replaced for lazy signals.
Examples
--------
Pass a larger array of different shape
>>> s = hs.signals.Signal1D(np.arange(20.).reshape((20,1)))
>>> def func(data, value=0):
... return data + value
>>> # pay attention that it's a tuple of tuples - need commas
>>> s._map_iterate(func,
... iterating_kwargs=(('value',
... np.random.rand(5,400).flat),))
>>> s.data.T
array([[ 0.82869603, 1.04961735, 2.21513949, 3.61329091,
4.2481755 , 5.81184375, 6.47696867, 7.07682618,
8.16850697, 9.37771809, 10.42794054, 11.24362699,
12.11434077, 13.98654036, 14.72864184, 15.30855499,
16.96854373, 17.65077064, 18.64925703, 19.16901297]])
Storing function result to other signal (e.g. calculated shifts)
>>> s = hs.signals.Signal1D(np.arange(20.).reshape((5,4)))
>>> def func(data): # the original function
... return data.sum()
>>> result = s._get_navigation_signal().T
>>> def wrapped(*args, data=None):
... return func(data)
>>> result._map_iterate(wrapped,
... iterating_kwargs=(('data', s),))
>>> result.data
array([ 6., 22., 38., 54., 70.])
"""
if parallel is None:
parallel = preferences.General.parallel
if parallel is True:
from os import cpu_count
parallel = cpu_count() or 1
# Because by default it's assumed to be I/O bound, and cpu_count*5 is
# used. For us this is not the case.
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
size = max(1, self.axes_manager.navigation_size)
from hyperspy.misc.utils import (create_map_objects,
map_result_construction)
func, iterators = create_map_objects(function, size, iterating_kwargs,
**kwargs)
iterators = (self._iterate_signal(),) + iterators
res_shape = self.axes_manager._navigation_shape_in_array
# no navigation
if not len(res_shape):
res_shape = (1,)
# pre-allocate some space
res_data = np.empty(res_shape, dtype='O')
shapes = set()
# parallel or sequential maps
if parallel:
from concurrent.futures import ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=parallel)
thismap = executor.map
else:
from builtins import map as thismap
pbar = progressbar(total=size, leave=True, disable=not
show_progressbar)
for ind, res in zip(range(res_data.size),
thismap(func, zip(*iterators))):
res_data.flat[ind] = res
if ragged is False:
# to be able to break quickly and not waste time / resources
shapes.add(res.shape)
if len(shapes) != 1:
raise ValueError('The result shapes are not identical, but'
'ragged=False')
else:
try:
shapes.add(res.shape)
except AttributeError:
shapes.add(None)
pbar.update(1)
if parallel:
executor.shutdown()
# Combine data if required
shapes = list(shapes)
suitable_shapes = len(shapes) == 1 and shapes[0] is not None
ragged = ragged or not suitable_shapes
sig_shape = None
if not ragged:
sig_shape = () if shapes[0] == (1,) else shapes[0]
res_data = np.stack(res_data.flat).reshape(
self.axes_manager._navigation_shape_in_array + sig_shape)
res = map_result_construction(self, inplace, res_data, ragged,
sig_shape)
return res
def copy(self):
try:
backup_plot = self._plot
self._plot = None
return copy.copy(self)
finally:
self._plot = backup_plot
def __deepcopy__(self, memo):
dc = type(self)(**self._to_dictionary())
if isinstance(dc.data, np.ndarray):
dc.data = dc.data.copy()
# uncomment if we want to deepcopy models as well:
# dc.models._add_dictionary(
# copy.deepcopy(
# self.models._models.as_dictionary()))
# The Signal subclasses might change the view on init
# The following code just copies the original view
for oaxis, caxis in zip(self.axes_manager._axes,
dc.axes_manager._axes):
caxis.navigate = oaxis.navigate
if dc.metadata.has_item('Markers'):
temp_marker_dict = dc.metadata.Markers.as_dictionary()
markers_dict = markers_metadata_dict_to_markers(
temp_marker_dict,
dc.axes_manager)
dc.metadata.Markers = markers_dict
return dc
def deepcopy(self):
return copy.deepcopy(self)
def change_dtype(self, dtype):
"""Change the data type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast. In addition to all
standard numpy dtypes HyperSpy supports four extra dtypes for RGB
images: "rgb8", "rgba8", "rgb16" and "rgba16". Changing from and to
any rgbx dtype is more constrained than most other dtype
conversions. To change to a rgbx dtype the signal dimension must be
1, its size 3(4) for rgb(rgba) dtypes, the dtype uint8(uint16) for
rgbx8(rgbx16) and the navigation dimension at least 2. After
conversion the signal dimension becomes 2. The dtype of images of
dtype rgbx8(rgbx16) can only be changed to uint8(uint16) and the
signal dimension becomes 1.
Examples
--------
>>> s = hs.signals.Signal1D([1,2,3,4,5])
>>> s.data
array([1, 2, 3, 4, 5])
>>> s.change_dtype('float')
>>> s.data
array([ 1., 2., 3., 4., 5.])
"""
if not isinstance(dtype, np.dtype):
if dtype in rgb_tools.rgb_dtypes:
if self.axes_manager.signal_dimension != 1:
raise AttributeError(
"Only 1D signals can be converted "
"to RGB images.")
if "8" in dtype and self.data.dtype.name != "uint8":
raise AttributeError(
"Only signals with dtype uint8 can be converted to "
"rgb8 images")
elif "16" in dtype and self.data.dtype.name != "uint16":
raise AttributeError(
"Only signals with dtype uint16 can be converted to "
"rgb16 images")
self.data = rgb_tools.regular_array2rgbx(self.data)
self.axes_manager.remove(-1)
self.axes_manager.set_signal_dimension(2)
self._assign_subclass()
return
else:
dtype = np.dtype(dtype)
if rgb_tools.is_rgbx(self.data) is True:
ddtype = self.data.dtype.fields["B"][0]
if ddtype != dtype:
raise ValueError(
"It is only possibile to change to %s." %
ddtype)
self.data = rgb_tools.rgbx2regular_array(self.data)
self.axes_manager._append_axis(
size=self.data.shape[-1],
scale=1,
offset=0,
name="RGB index",
navigate=False,)
self.axes_manager.set_signal_dimension(1)
self._assign_subclass()
return
else:
self.data = self.data.astype(dtype)
self._assign_subclass()
def estimate_poissonian_noise_variance(self,
expected_value=None,
gain_factor=None,
gain_offset=None,
correlation_factor=None):
"""Estimate the poissonian noise variance of the signal.
The variance is stored in the
``metadata.Signal.Noise_properties.variance`` attribute.
A poissonian noise variance is equal to the expected value. With the
default arguments, this method simply sets the variance attribute to
the given `expected_value`. However, more generally (although then
noise is not strictly poissonian), the variance may be proportional to
the expected value. Moreover, when the noise is a mixture of white
(gaussian) and poissonian noise, the variance is described by the
following linear model:
.. math::
\mathrm{Var}[X] = (a * \mathrm{E}[X] + b) * c
Where `a` is the `gain_factor`, `b` is the `gain_offset` (the gaussian
noise variance) and `c` the `correlation_factor`. The correlation
factor accounts for correlation of adjacent signal elements that can
be modeled as a convolution with a gaussian point spread function.
Parameters
----------
expected_value : None or Signal instance.
If None, the signal data is taken as the expected value. Note that
this may be inaccurate where `data` is small.
gain_factor, gain_offset, correlation_factor: None or float.
All three must be positive. If None, take the values from
``metadata.Signal.Noise_properties.Variance_linear_model`` if
defined. Otherwise suppose poissonian noise i.e. ``gain_factor=1``,
``gain_offset=0``, ``correlation_factor=1``. If not None, the
values are stored in
``metadata.Signal.Noise_properties.Variance_linear_model``.
"""
if expected_value is None:
expected_value = self
dc = expected_value.data if expected_value._lazy else expected_value.data.copy()
if self.metadata.has_item(
"Signal.Noise_properties.Variance_linear_model"):
vlm = self.metadata.Signal.Noise_properties.Variance_linear_model
else:
self.metadata.add_node(
"Signal.Noise_properties.Variance_linear_model")
vlm = self.metadata.Signal.Noise_properties.Variance_linear_model
if gain_factor is None:
if not vlm.has_item("gain_factor"):
vlm.gain_factor = 1
gain_factor = vlm.gain_factor
if gain_offset is None:
if not vlm.has_item("gain_offset"):
vlm.gain_offset = 0
gain_offset = vlm.gain_offset
if correlation_factor is None:
if not vlm.has_item("correlation_factor"):
vlm.correlation_factor = 1
correlation_factor = vlm.correlation_factor
if gain_offset < 0:
raise ValueError("`gain_offset` must be positive.")
if gain_factor < 0:
raise ValueError("`gain_factor` must be positive.")
if correlation_factor < 0:
raise ValueError("`correlation_factor` must be positive.")
variance = self._estimate_poissonian_noise_variance(dc, gain_factor,
gain_offset,
correlation_factor)
variance = BaseSignal(variance, attributes={'_lazy': self._lazy})
variance.axes_manager = self.axes_manager
variance.metadata.General.title = ("Variance of " +
self.metadata.General.title)
self.metadata.set_item(
"Signal.Noise_properties.variance", variance)
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
variance = np.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
def get_current_signal(self, auto_title=True, auto_filename=True):
"""Returns the data at the current coordinates as a Signal subclass.
The signal subclass is the same as that of the current object. All the
axes navigation attribute are set to False.
Parameters
----------
auto_title : bool
If True an space followed by the current indices in parenthesis
are appended to the title.
auto_filename : bool
If True and `tmp_parameters.filename` is defined
(what is always the case when the Signal has been read from a
file), the filename is modified by appending an underscore and a
parenthesis containing the current indices.
Returns
-------
cs : Signal subclass instance.
Examples
--------
>>> im = hs.signals.Signal2D(np.zeros((2,3, 32,32)))
>>> im
<Signal2D, title: , dimensions: (3, 2, 32, 32)>
>>> im.axes_manager.indices = 2,1
>>> im.get_current_signal()
<Signal2D, title: (2, 1), dimensions: (32, 32)>
"""
cs = self.__class__(
self(),
axes=self.axes_manager._get_signal_axes_dicts(),
metadata=self.metadata.as_dictionary(),
attributes={'_lazy': False})
if auto_filename is True and self.tmp_parameters.has_item('filename'):
cs.tmp_parameters.filename = (self.tmp_parameters.filename +
'_' +
str(self.axes_manager.indices))
cs.tmp_parameters.extension = self.tmp_parameters.extension
cs.tmp_parameters.folder = self.tmp_parameters.folder
if auto_title is True:
cs.metadata.General.title = (cs.metadata.General.title +
' ' + str(self.axes_manager.indices))
cs.axes_manager._set_axis_attribute_values("navigate", False)
return cs
def _get_navigation_signal(self, data=None, dtype=None):
"""Return a signal with the same axes as the navigation space.
Parameters
----------
data : {None, numpy array}, optional
If None the `Signal` data is an array of the same dtype as the
current one filled with zeros. If a numpy array, the array must
have the correct dimensions.
dtype : data-type, optional
The desired data-type for the data array when `data` is None,
e.g., `numpy.int8`. Default is the data type of the current signal
data.
"""
from dask.array import Array
if data is not None:
ref_shape = (self.axes_manager._navigation_shape_in_array
if self.axes_manager.navigation_dimension != 0
else (1,))
if data.shape != ref_shape:
raise ValueError(
("data.shape %s is not equal to the current navigation "
"shape in array which is %s") %
(str(data.shape), str(ref_shape)))
else:
if dtype is None:
dtype = self.data.dtype
if self.axes_manager.navigation_dimension == 0:
data = np.array([0, ], dtype=dtype)
else:
data = np.zeros(
self.axes_manager._navigation_shape_in_array,
dtype=dtype)
if self.axes_manager.navigation_dimension == 0:
s = BaseSignal(data)
elif self.axes_manager.navigation_dimension == 1:
from hyperspy._signals.signal1d import Signal1D
s = Signal1D(data,
axes=self.axes_manager._get_navigation_axes_dicts())
elif self.axes_manager.navigation_dimension == 2:
from hyperspy._signals.signal2d import Signal2D
s = Signal2D(data,
axes=self.axes_manager._get_navigation_axes_dicts())
else:
s = BaseSignal(
data,
axes=self.axes_manager._get_navigation_axes_dicts())
s.axes_manager.set_signal_dimension(
self.axes_manager.navigation_dimension)
if isinstance(data, Array):
s = s.as_lazy()
return s
def _get_signal_signal(self, data=None, dtype=None):
"""Return a signal with the same axes as the signal space.
Parameters
----------
data : {None, numpy array}, optional
If None the `Signal` data is an array of the same dtype as the
current one filled with zeros. If a numpy array, the array must
have the correct dimensions.
dtype : data-type, optional
The desired data-type for the data array when `data` is None,
e.g., `numpy.int8`. Default is the data type of the current signal
data.
"""
from dask.array import Array
if data is not None:
ref_shape = (self.axes_manager._signal_shape_in_array
if self.axes_manager.signal_dimension != 0
else (1,))
if data.shape != ref_shape:
raise ValueError(
"data.shape %s is not equal to the current signal shape in"
" array which is %s" % (str(data.shape), str(ref_shape)))
else:
if dtype is None:
dtype = self.data.dtype
if self.axes_manager.signal_dimension == 0:
data = np.array([0, ], dtype=dtype)
else:
data = np.zeros(
self.axes_manager._signal_shape_in_array,
dtype=dtype)
if self.axes_manager.signal_dimension == 0:
s = BaseSignal(data)
s.set_signal_type(self.metadata.Signal.signal_type)
else:
s = self.__class__(data,
axes=self.axes_manager._get_signal_axes_dicts())
if isinstance(data, Array):
s = s.as_lazy()
return s
def __iter__(self):
# Reset AxesManager iteration index
self.axes_manager.__iter__()
return self
def __next__(self):
next(self.axes_manager)
return self.get_current_signal()
def __len__(self):
nitem = int(self.axes_manager.navigation_size)
nitem = nitem if nitem > 0 else 1
return nitem
def as_signal1D(self, spectral_axis, out=None):
"""Return the Signal as a spectrum.
The chosen spectral axis is moved to the last index in the
array and the data is made contiguous for effecient
iteration over spectra.
Parameters
----------
spectral_axis %s
%s
See Also
--------
as_signal2D, transpose, hs.transpose
Examples
--------
>>> img = hs.signals.Signal2D(np.ones((3,4,5,6)))
>>> img
<Signal2D, title: , dimensions: (4, 3, 6, 5)>
>>> img.to_spectrum(-1+1j)
<Signal1D, title: , dimensions: (6, 5, 4, 3)>
>>> img.to_spectrum(0)
<Signal1D, title: , dimensions: (6, 5, 3, 4)>
"""
sp = self.transpose(signal_axes=[spectral_axis], optimize=True)
if out is None:
return sp
else:
if out._lazy:
out.data = sp.data
else:
out.data[:] = sp.data
out.events.data_changed.trigger(obj=out)
as_signal1D.__doc__ %= (ONE_AXIS_PARAMETER, OUT_ARG)
def as_signal2D(self, image_axes, out=None):
"""Convert signal to image.
The chosen image axes are moved to the last indices in the
array and the data is made contiguous for effecient
iteration over images.
Parameters
----------
image_axes : tuple of {int | str | axis}
Select the image axes. Note that the order of the axes matters
and it is given in the "natural" i.e. X, Y, Z... order.
%s
Raises
------
DataDimensionError : when data.ndim < 2
See Also
--------
as_signal1D, transpose, hs.transpose
Examples
--------
>>> s = hs.signals.Signal1D(np.ones((2,3,4,5)))
>>> s
<Signal1D, title: , dimensions: (4, 3, 2, 5)>
>>> s.as_signal2D((0,1))
<Signal2D, title: , dimensions: (5, 2, 4, 3)>
>>> s.to_signal2D((1,2))
<Signal2D, title: , dimensions: (4, 5, 3, 2)>
"""
if self.data.ndim < 2:
raise DataDimensionError(
"A Signal dimension must be >= 2 to be converted to a Signal2D")
im = self.transpose(signal_axes=image_axes, optimize=True)
if out is None:
return im
else:
if out._lazy:
out.data = im.data
else:
out.data[:] = im.data
out.events.data_changed.trigger(obj=out)
as_signal2D.__doc__ %= OUT_ARG
def _assign_subclass(self):
mp = self.metadata
self.__class__ = hyperspy.io.assign_signal_subclass(
dtype=self.data.dtype,
signal_dimension=self.axes_manager.signal_dimension,
signal_type=mp.Signal.signal_type
if "Signal.signal_type" in mp
else self._signal_type,
lazy=self._lazy)
if self._alias_signal_types: # In case legacy types exist:
mp.Signal.signal_type = self._signal_type # set to default!
self.__init__(**self._to_dictionary(add_models=True))
if self._lazy:
self._make_lazy()
def set_signal_type(self, signal_type):
"""Set the signal type and change the current class
accordingly if pertinent.
The signal_type attribute specifies the kind of data that the signal
containts e.g. "EELS" for electron energy-loss spectroscopy,
"PES" for photoemission spectroscopy. There are some methods that are
only available for certain kind of signals, so setting this
parameter can enable/disable features.
Parameters
----------
signal_type : {"EELS", "EDS_TEM", "EDS_SEM", "DielectricFunction"}
Currently there are special features for "EELS" (electron
energy-loss spectroscopy), "EDS_TEM" (energy dispersive X-rays of
thin samples, normally obtained in a transmission electron
microscope), "EDS_SEM" (energy dispersive X-rays of thick samples,
normally obtained in a scanning electron microscope) and
"DielectricFuction". Setting the signal_type to the correct acronym
is highly advisable when analyzing any signal for which HyperSpy
provides extra features. Even if HyperSpy does not provide extra
features for the signal that you are analyzing, it is good practice
to set signal_type to a value that best describes the data signal
type.
"""
self.metadata.Signal.signal_type = signal_type
self._assign_subclass()
def set_signal_origin(self, origin):
"""Set the `signal_origin` metadata value.
The signal_origin attribute specifies if the data was obtained
through experiment or simulation.
Parameters
----------
origin : string
Typically 'experiment' or 'simulation'.
"""
self.metadata.Signal.signal_origin = origin
def print_summary_statistics(self, formatter="%.3f"):
"""Prints the five-number summary statistics of the data, the mean and
the standard deviation.
Prints the mean, standandard deviation (std), maximum (max), minimum
(min), first quartile (Q1), median and third quartile. nans are
removed from the calculations.
Parameters
----------
formatter : bool
Number formatter.
See Also
--------
get_histogram
"""
_mean, _std, _min, _q1, _q2, _q3, _max = self._calculate_summary_statistics()
print(underline("Summary statistics"))
print("mean:\t" + formatter % _mean)
print("std:\t" + formatter % _std)
print()
print("min:\t" + formatter % _min)
print("Q1:\t" + formatter % _q1)
print("median:\t" + formatter % _q2)
print("Q3:\t" + formatter % _q3)
print("max:\t" + formatter % _max)
def _calculate_summary_statistics(self):
data = self.data
data = data[~np.isnan(data)]
_mean = np.nanmean(data)
_std = np.nanstd(data)
_min = np.nanmin(data)
_q1 = np.percentile(data, 25)
_q2 = np.percentile(data, 50)
_q3 = np.percentile(data, 75)
_max = np.nanmax(data)
return _mean, _std, _min, _q1, _q2, _q3, _max
@property
def is_rgba(self):
return rgb_tools.is_rgba(self.data)
@property
def is_rgb(self):
return rgb_tools.is_rgb(self.data)
@property
def is_rgbx(self):
return rgb_tools.is_rgbx(self.data)
def add_marker(
self, marker, plot_on_signal=True, plot_marker=True,
permanent=False, plot_signal=True):
"""
Add a marker to the signal or navigator plot.
Plot the signal, if not yet plotted
Parameters
----------
marker : marker object or iterable of marker objects
The marker or iterable (list, tuple, ...) of markers to add.
See `plot.markers`. If you want to add a large number of markers,
add them as an iterable, since this will be much faster.
plot_on_signal : bool, default True
If True, add the marker to the signal
If False, add the marker to the navigator
plot_marker : bool, default True
If True, plot the marker.
permanent : bool, default False
If False, the marker will only appear in the current
plot. If True, the marker will be added to the
metadata.Markers list, and be plotted with plot(plot_markers=True).
If the signal is saved as a HyperSpy HDF5 file, the markers will be
stored in the HDF5 signal and be restored when the file is loaded.
Examples
--------
>>> import scipy.misc
>>> im = hs.signals.Signal2D(scipy.misc.ascent())
>>> m = hs.markers.rectangle(x1=150, y1=100, x2=400,
>>> y2=400, color='red')
>>> im.add_marker(m)
Adding to a 1D signal, where the point will change
when the navigation index is changed
>>> s = hs.signals.Signal1D(np.random.random((3, 100)))
>>> marker = hs.markers.point((19, 10, 60), (0.2, 0.5, 0.9))
>>> s.add_marker(marker, permanent=True, plot_marker=True)
>>> s.plot(plot_markers=True) #doctest: +SKIP
Add permanent marker
>>> s = hs.signals.Signal2D(np.random.random((100, 100)))
>>> marker = hs.markers.point(50, 60)
>>> s.add_marker(marker, permanent=True, plot_marker=True)
>>> s.plot(plot_markers=True) #doctest: +SKIP
Add permanent marker which changes with navigation position, and
do not add it to a current plot
>>> s = hs.signals.Signal2D(np.random.randint(10, size=(3, 100, 100)))
>>> marker = hs.markers.point((10, 30, 50), (30, 50, 60), color='red')
>>> s.add_marker(marker, permanent=True, plot_marker=False)
>>> s.plot(plot_markers=True) #doctest: +SKIP
Removing a permanent marker
>>> s = hs.signals.Signal2D(np.random.randint(10, size=(100, 100)))
>>> marker = hs.markers.point(10, 60, color='red')
>>> marker.name = "point_marker"
>>> s.add_marker(marker, permanent=True)
>>> del s.metadata.Markers.point_marker
Adding many markers as a list
>>> from numpy.random import random
>>> s = hs.signals.Signal2D(np.random.randint(10, size=(100, 100)))
>>> marker_list = []
>>> for i in range(100):
>>> marker = hs.markers.point(random()*100, random()*100, color='red')
>>> marker_list.append(marker)
>>> s.add_marker(marker_list, permanent=True)
"""
if isiterable(marker):
marker_list = marker
else:
marker_list = [marker]
markers_dict = {}
if permanent:
if not self.metadata.has_item('Markers'):
self.metadata.add_node('Markers')
marker_object_list = []
for marker_tuple in list(self.metadata.Markers):
marker_object_list.append(marker_tuple[1])
name_list = self.metadata.Markers.keys()
marker_name_suffix = 1
for m in marker_list:
marker_data_shape = m._get_data_shape()
if (not (len(marker_data_shape) == 0)) and (
marker_data_shape != self.axes_manager.navigation_shape):
raise ValueError(
"Navigation shape of the marker must be 0 or the "
"same navigation shape as this signal.")
if (m.signal is not None) and (m.signal is not self):
raise ValueError("Markers can not be added to several signals")
m._plot_on_signal = plot_on_signal
if plot_marker:
if self._plot is None:
self.plot()
if m._plot_on_signal:
self._plot.signal_plot.add_marker(m)
else:
if self._plot.navigator_plot is None:
self.plot()
self._plot.navigator_plot.add_marker(m)
m.plot(update_plot=False)
if permanent:
for marker_object in marker_object_list:
if m is marker_object:
raise ValueError("Marker already added to signal")
name = m.name
temp_name = name
while temp_name in name_list:
temp_name = name + str(marker_name_suffix)
marker_name_suffix += 1
m.name = temp_name
markers_dict[m.name] = m
m.signal = self
marker_object_list.append(m)
name_list.append(m.name)
if not plot_marker and not permanent:
_logger.warning(
"plot_marker=False and permanent=False does nothing")
if permanent:
self.metadata.Markers = markers_dict
if plot_marker:
if self._plot.signal_plot:
self._plot.signal_plot.ax.hspy_fig._draw_animated()
if self._plot.navigator_plot:
self._plot.navigator_plot.ax.hspy_fig._draw_animated()
def _plot_permanent_markers(self):
marker_name_list = self.metadata.Markers.keys()
markers_dict = self.metadata.Markers.__dict__
for marker_name in marker_name_list:
marker = markers_dict[marker_name]['_dtb_value_']
if marker.plot_marker:
if marker._plot_on_signal:
self._plot.signal_plot.add_marker(marker)
else:
self._plot.navigator_plot.add_marker(marker)
marker.plot(update_plot=False)
if self._plot.signal_plot:
self._plot.signal_plot.ax.hspy_fig._draw_animated()
if self._plot.navigator_plot:
self._plot.navigator_plot.ax.hspy_fig._draw_animated()
def add_poissonian_noise(self, **kwargs):
"""Add Poissonian noise to the data"""
original_type = self.data.dtype
self.data = np.random.poisson(self.data, **kwargs).astype(
original_type)
self.events.data_changed.trigger(obj=self)
def add_gaussian_noise(self, std):
"""Add Gaussian noise to the data
Parameters
----------
std : float
"""
noise = np.random.normal(0,
std,
self.data.shape)
original_dtype = self.data.dtype
self.data = (
self.data.astype(
noise.dtype) +
noise).astype(original_dtype)
self.events.data_changed.trigger(obj=self)
def transpose(self, signal_axes=None,
navigation_axes=None, optimize=False):
"""Transposes the signal to have the required signal and navigation
axes.
Parameters
----------
signal_axes, navigation_axes : {None, int, iterable}
With the exception of both parameters getting iterables, generally
one has to be None (i.e. "floating"). The other one specifies
either the required number or explicitly the axes to move to the
corresponding space.
If both are iterables, full control is given as long as all axes
are assigned to one space only.
optimize : bool [False]
If the data should be re-ordered in memory, most likely making a
copy. Ensures the fastest available iteration at the expense of
memory.
See also
--------
T, as_signal2D, as_signal1D, hs.transpose
Examples
--------
>>> # just create a signal with many distinct dimensions
>>> s = hs.signals.BaseSignal(np.random.rand(1,2,3,4,5,6,7,8,9))
>>> s
<BaseSignal, title: , dimensions: (|9, 8, 7, 6, 5, 4, 3, 2, 1)>
>>> s.transpose() # swap signal and navigation spaces
<BaseSignal, title: , dimensions: (9, 8, 7, 6, 5, 4, 3, 2, 1|)>
>>> s.T # a shortcut for no arguments
<BaseSignal, title: , dimensions: (9, 8, 7, 6, 5, 4, 3, 2, 1|)>
# roll to leave 5 axes in navigation space
>>> s.transpose(signal_axes=5)
<BaseSignal, title: , dimensions: (4, 3, 2, 1|9, 8, 7, 6, 5)>
# roll leave 3 axes in navigation space
>>> s.transpose(navigation_axes=3)
<BaseSignal, title: , dimensions: (3, 2, 1|9, 8, 7, 6, 5, 4)>
>>> # 3 explicitly defined axes in signal space
>>> s.transpose(signal_axes=[0, 2, 6])
<BaseSignal, title: , dimensions: (8, 6, 5, 4, 2, 1|9, 7, 3)>
>>> # A mix of two lists, but specifying all axes explicitly
>>> # The order of axes is preserved in both lists
>>> s.transpose(navigation_axes=[1, 2, 3, 4, 5, 8], signal_axes=[0, 6, 7])
<BaseSignal, title: , dimensions: (8, 7, 6, 5, 4, 1|9, 3, 2)>
"""
am = self.axes_manager
ns = self.axes_manager.navigation_axes + self.axes_manager.signal_axes
ax_list = am._axes
if isinstance(signal_axes, int):
if navigation_axes is not None:
raise ValueError("The navigation_axes are not None, even "
"though just a number was given for "
"signal_axes")
if len(ax_list) < signal_axes:
raise ValueError("Too many signal axes requested")
if signal_axes < 0:
raise ValueError("Can't have negative number of signal axes")
elif signal_axes == 0:
signal_axes = ()
navigation_axes = ax_list[::-1]
else:
navigation_axes = ax_list[:-signal_axes][::-1]
signal_axes = ax_list[-signal_axes:][::-1]
elif iterable_not_string(signal_axes):
signal_axes = tuple(am[ax] for ax in signal_axes)
if navigation_axes is None:
navigation_axes = tuple(ax for ax in ax_list
if ax not in signal_axes)[::-1]
elif iterable_not_string(navigation_axes):
# want to keep the order
navigation_axes = tuple(am[ax] for ax in navigation_axes)
intersection = set(signal_axes).intersection(navigation_axes)
if len(intersection):
raise ValueError("At least one axis found in both spaces:"
" {}".format(intersection))
if len(am._axes) != (len(signal_axes) + len(navigation_axes)):
raise ValueError("Not all current axes were assigned to a "
"space")
else:
raise ValueError("navigation_axes has to be None or an iterable"
" when signal_axes is iterable")
elif signal_axes is None:
if isinstance(navigation_axes, int):
if len(ax_list) < navigation_axes:
raise ValueError("Too many navigation axes requested")
if navigation_axes < 0:
raise ValueError(
"Can't have negative number of navigation axes")
elif navigation_axes == 0:
navigation_axes = ()
signal_axes = ax_list[::-1]
else:
signal_axes = ax_list[navigation_axes:][::-1]
navigation_axes = ax_list[:navigation_axes][::-1]
elif iterable_not_string(navigation_axes):
navigation_axes = tuple(am[ax] for ax in
navigation_axes)
signal_axes = tuple(ax for ax in ax_list
if ax not in navigation_axes)[::-1]
elif navigation_axes is None:
signal_axes = am.navigation_axes
navigation_axes = am.signal_axes
else:
raise ValueError(
"The passed navigation_axes argument is not valid")
else:
raise ValueError("The passed signal_axes argument is not valid")
# translate to axes idx from actual objects for variance
idx_sig = [ax.index_in_axes_manager for ax in signal_axes]
idx_nav = [ax.index_in_axes_manager for ax in navigation_axes]
# From now on we operate with axes in array order
signal_axes = signal_axes[::-1]
navigation_axes = navigation_axes[::-1]
# get data view
array_order = tuple(
ax.index_in_array for ax in navigation_axes)
array_order += tuple(ax.index_in_array for ax in signal_axes)
newdata = self.data.transpose(array_order)
res = self._deepcopy_with_new_data(newdata, copy_variance=True)
# reconfigure the axes of the axesmanager:
ram = res.axes_manager
ram._update_trait_handlers(remove=True)
# _axes are ordered in array order
ram._axes = [ram._axes[i] for i in array_order]
for i, ax in enumerate(ram._axes):
if i < len(navigation_axes):
ax.navigate = True
else:
ax.navigate = False
ram._update_attributes()
ram._update_trait_handlers(remove=False)
res._assign_subclass()
if res.metadata.has_item("Signal.Noise_properties.variance"):
var = res.metadata.Signal.Noise_properties.variance
if isinstance(var, BaseSignal):
var = var.transpose(signal_axes=idx_sig,
navigation_axes=idx_nav,
optimize=optimize)
res.metadata.set_item('Signal.Noise_properties.variance', var)
if optimize:
res._make_sure_data_is_contiguous(log=True)
if res.metadata.has_item('Markers'):
# The markers might fail if the navigation dimensions are changed
# so the safest is simply to not carry them over from the
# previous signal.
del res.metadata.Markers
return res
@property
def T(self):
"""The transpose of the signal, with signal and navigation spaces
swapped.
"""
return self.transpose()
ARITHMETIC_OPERATORS = (
"__add__",
"__sub__",
"__mul__",
"__floordiv__",
"__mod__",
"__divmod__",
"__pow__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__mod__",
"__truediv__",
)
INPLACE_OPERATORS = (
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ipow__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
)
COMPARISON_OPERATORS = (
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__ge__",
"__gt__",
)
UNARY_OPERATORS = (
"__neg__",
"__pos__",
"__abs__",
"__invert__",
)
for name in ARITHMETIC_OPERATORS + INPLACE_OPERATORS + COMPARISON_OPERATORS:
exec(
("def %s(self, other):\n" % name) +
(" return self._binary_operator_ruler(other, \'%s\')\n" %
name))
exec("%s.__doc__ = np.ndarray.%s.__doc__" % (name, name))
exec("setattr(BaseSignal, \'%s\', %s)" % (name, name))
# The following commented line enables the operators with swapped
# operands. They should be defined only for commutative operators
# but for simplicity we don't support this at all atm.
# exec("setattr(BaseSignal, \'%s\', %s)" % (name[:2] + "r" + name[2:],
# name))
# Implement unary arithmetic operations
for name in UNARY_OPERATORS:
exec(
("def %s(self):" % name) +
(" return self._unary_operator_ruler(\'%s\')" % name))
exec("%s.__doc__ = int.%s.__doc__" % (name, name))
exec("setattr(BaseSignal, \'%s\', %s)" % (name, name))
|
CodeMonkeyJan/hyperspy
|
hyperspy/signal.py
|
Python
|
gpl-3.0
| 180,567
|
[
"Gaussian"
] |
9396948d60350cdbb390b5f5cad774a531050f2660d77ee8df02cf91a9220ecb
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack import xmlutil
from nova import test
class SelectorTest(test.TestCase):
obj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertEqual(sel(self.obj_for_test), None)
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
class TemplateElementTest(test.TestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.subselector, None)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertEqual('child' in elem, True)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [
xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'),
]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertEqual('child2' in elem, False)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertEqual(elem.text, None)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertEqual(elem.text, None)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertEqual(elem.text, None)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
class TemplateTest(test.TestCase):
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
},
'image': {
'name': 'image_foobar',
'id': 42,
},
},
}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.TestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(MasterTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertNotEqual(MasterTemplateBuilder._tmpl, None)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(SlaveTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertNotEqual(SlaveTemplateBuilder._tmpl, None)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.TestCase):
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
|
sridevikoushik31/openstack
|
nova/tests/api/openstack/test_xmlutil.py
|
Python
|
apache-2.0
| 25,723
|
[
"VisIt"
] |
df08200ec0d25ae481a588c5f8c0d704bec7294e68f86c8419e5f1d339e11320
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample for the Provisioning API and the Email Settings API with OAuth 2.0"""
__author__ = 'Shraddha Gupta <[email protected]>'
from optparse import OptionParser
import gdata.apps
import gdata.apps.emailsettings.client
import gdata.client
import gdata.gauth
API_VERSION = '2.0'
BASE_URL = '/a/feeds/group/%s' % API_VERSION
SCOPE = ('https://apps-apis.google.com/a/feeds/group '
'https://apps-apis.google.com/a/feeds/emailsettings/2.0/')
HOST = 'apps-apis.google.com'
class OAuth2ClientSample(object):
""" OAuth2ClientSample object demos the use of OAuth2Token for retrieving
Members of a Group and updating Email Settings for them."""
def __init__(self, domain, client_id, client_secret):
"""
Args:
domain: string Domain name (e.g. domain.com)
client_id: string Client_id of domain admin account.
client_secret: string Client_secret of domain admin account.
"""
try:
self.token = gdata.gauth.OAuth2Token(client_id=client_id,
client_secret=client_secret,
scope=SCOPE,
user_agent='oauth2-provisioningv2')
self.uri = self.token.generate_authorize_url()
print 'Please visit this URL to authorize the application:'
print self.uri
# Get the verification code from the standard input.
code = raw_input('What is the verification code? ').strip()
self.token.get_access_token(code)
except gdata.gauth.OAuth2AccessTokenError, e:
print 'Invalid Access token, Check your credentials %s' % e
exit(0)
self.domain = domain
self.baseuri = '%s/%s' % (BASE_URL, domain)
self.client = gdata.client.GDClient(host=HOST)
self.client.domain = self.domain
# Authorize the client.
# This will add the Authorization header to all future requests.
self.token.authorize(self.client)
self.email_client = gdata.apps.emailsettings.client.EmailSettingsClient(
domain=self.domain, auth_token=self.token)
self.token.authorize(self.email_client)
def get_users(self, group):
"""Retrieves members from the given group.
Args:
group: string Id of the group
Returns:
Member feed for the given group
"""
uri = '%s/%s/member' % (self.baseuri, group)
try:
feed = self.client.GetFeed(uri=uri)
return gdata.apps.PropertyFeedFromString(str(feed))
except gdata.client.RequestError, e:
print 'Exception %s' % e
def create_filter(self, feed):
"""Creates a mail filter that marks as read all messages not containing
Domain name as one of their words for each member of the group.
Args:
feed: PropertyFeed Member feed whose emailsettings need to be updated
"""
for entry in feed.entry:
memberType = None
memberId = None
domain = None
for i, property in enumerate(entry.property):
if property.name == 'memberType':
memberType = property.value
if property.name == 'memberId':
user_name, domain = property.value.split('@', 1)
memberId = property.value
# Check that the member is a User belonging to the primary Domain.
if memberType == 'User' and domain == self.domain:
print 'creating filter for %s' % memberId
self.email_client.CreateFilter(user_name,
does_not_have_the_word=self.domain,
mark_as_read=True)
elif memberType == 'User':
print 'User belongs to other Domain %s' %memberId
else:
print 'Member is a group %s' %memberId
def run(self, group):
feed = self.get_users(group)
self.create_filter(feed)
def main():
"""Demos the Provisioning API and the Email Settings API with OAuth 2.0."""
usage = 'usage: %prog [options]'
parser = OptionParser(usage=usage)
parser.add_option('--DOMAIN',
help='Google Apps Domain, e.g. "domain.com".')
parser.add_option('--CLIENT_ID',
help='Registered CLIENT_ID of Domain.')
parser.add_option('--CLIENT_SECRET',
help='Registered CLIENT_SECRET of Domain.')
parser.add_option('--GROUP',
help='Group identifier')
(options, args) = parser.parse_args()
if None in (options.DOMAIN, options.CLIENT_ID, options.CLIENT_SECRET,
options.GROUP):
parser.print_help()
return
sample = OAuth2ClientSample(options.DOMAIN,
options.CLIENT_ID, options.CLIENT_SECRET)
sample.run(options.GROUP)
if __name__ == '__main__':
main()
|
mzdaniel/oh-mainline
|
vendor/packages/gdata/samples/apps/provisioning_oauth2_example.py
|
Python
|
agpl-3.0
| 5,260
|
[
"VisIt"
] |
f516adb36fd271ee230b7c956c085746e144f9e7680a77726b316a3c494da9cf
|
#!/usr/bin/python
# (c) 2013, Cove Schneider
# (c) 2014, Joshua Conner <[email protected]>
# (c) 2014, Pavel Antonov <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
DOCUMENTATION = '''
---
module: docker
version_added: "1.4"
short_description: manage docker containers
deprecated: In 2.2 use M(docker_container) and M(docker_image) instead.
description:
- This is the original Ansible module for managing the Docker container life cycle.
- "NOTE: Additional and newer modules are available. For the latest on orchestrating containers with Ansible
visit our Getting Started with Docker Guide at https://github.com/ansible/ansible/blob/devel/docsite/rst/guide_docker.rst."
options:
count:
description:
- Number of matching containers that should be in the desired state.
default: 1
image:
description:
- Container image used to match and launch containers.
required: true
pull:
description:
- Control when container images are updated from the C(docker_url) registry.
If "missing," images will be pulled only when missing from the host;
if '"always," the registry will be checked for a newer version of the
image' each time the task executes.
default: missing
choices: [ "missing", "always" ]
version_added: "1.9"
entrypoint:
description:
- Corresponds to ``--entrypoint`` option of ``docker run`` command and
``ENTRYPOINT`` directive of Dockerfile.
Used to match and launch containers.
default: null
required: false
version_added: "2.1"
command:
description:
- Command used to match and launch containers.
default: null
name:
description:
- Name used to match and uniquely name launched containers. Explicit names
are used to uniquely identify a single container or to link among
containers. Mutually exclusive with a "count" other than "1".
default: null
version_added: "1.5"
ports:
description:
- "List containing private to public port mapping specification.
Use docker 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)'
where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface.
The container ports need to be exposed either in the Dockerfile or via the C(expose) option."
default: null
version_added: "1.5"
expose:
description:
- List of additional container ports to expose for port mappings or links.
If the port is already exposed using EXPOSE in a Dockerfile, you don't
need to expose it again.
default: null
version_added: "1.5"
publish_all_ports:
description:
- Publish all exposed ports to the host interfaces.
default: false
version_added: "1.5"
volumes:
description:
- List of volumes to mount within the container
- 'Use docker CLI-style syntax: C(/host:/container[:mode])'
- You can specify a read mode for the mount with either C(ro) or C(rw).
Starting at version 2.1, SELinux hosts can additionally use C(z) or C(Z)
mount options to use a shared or private label for the volume.
default: null
volumes_from:
description:
- List of names of containers to mount volumes from.
default: null
links:
description:
- List of other containers to link within this container with an optional
- 'alias. Use docker CLI-style syntax: C(redis:myredis).'
default: null
version_added: "1.5"
devices:
description:
- List of host devices to expose to container
default: null
required: false
version_added: "2.1"
log_driver:
description:
- You can specify a different logging driver for the container than for the daemon.
"json-file" Default logging driver for Docker. Writes JSON messages to file.
docker logs command is available only for this logging driver.
"none" disables any logging for the container.
"syslog" Syslog logging driver for Docker. Writes log messages to syslog.
docker logs command is not available for this logging driver.
"journald" Journald logging driver for Docker. Writes log messages to "journald".
"gelf" Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash.
"fluentd" Fluentd logging driver for Docker. Writes log messages to "fluentd" (forward input).
"awslogs" (added in 2.1) Awslogs logging driver for Docker. Writes log messages to AWS Cloudwatch Logs.
If not defined explicitly, the Docker daemon's default ("json-file") will apply.
Requires docker >= 1.6.0.
required: false
default: json-file
choices:
- json-file
- none
- syslog
- journald
- gelf
- fluentd
- awslogs
version_added: "2.0"
log_opt:
description:
- Additional options to pass to the logging driver selected above. See Docker `log-driver
<https://docs.docker.com/reference/logging/overview/>` documentation for more information.
Requires docker >=1.7.0.
required: false
default: null
version_added: "2.0"
memory_limit:
description:
- RAM allocated to the container as a number of bytes or as a human-readable
string like "512MB". Leave as "0" to specify no limit.
default: 0
docker_url:
description:
- URL of the host running the docker daemon. This will default to the env
var DOCKER_HOST if unspecified.
default: ${DOCKER_HOST} or unix://var/run/docker.sock
use_tls:
description:
- Whether to use tls to connect to the docker server. "no" means not to
use tls (and ignore any other tls related parameters). "encrypt" means
to use tls to encrypt the connection to the server. "verify" means to
also verify that the server's certificate is valid for the server
(this both verifies the certificate against the CA and that the
certificate was issued for that host. If this is unspecified, tls will
only be used if one of the other tls options require it.
choices: [ "no", "encrypt", "verify" ]
version_added: "1.9"
tls_client_cert:
description:
- Path to the PEM-encoded certificate used to authenticate docker client.
If specified tls_client_key must be valid
default: ${DOCKER_CERT_PATH}/cert.pem
version_added: "1.9"
tls_client_key:
description:
- Path to the PEM-encoded key used to authenticate docker client. If
specified tls_client_cert must be valid
default: ${DOCKER_CERT_PATH}/key.pem
version_added: "1.9"
tls_ca_cert:
description:
- Path to a PEM-encoded certificate authority to secure the Docker connection.
This has no effect if use_tls is encrypt.
default: ${DOCKER_CERT_PATH}/ca.pem
version_added: "1.9"
tls_hostname:
description:
- A hostname to check matches what's supplied in the docker server's
certificate. If unspecified, the hostname is taken from the docker_url.
default: Taken from docker_url
version_added: "1.9"
docker_api_version:
description:
- Remote API version to use. This defaults to the current default as
specified by docker-py.
default: docker-py default remote API version
version_added: "1.8"
docker_user:
description:
- Username or UID to use within the container
required: false
default: null
version_added: "2.0"
username:
description:
- Remote API username.
default: null
password:
description:
- Remote API password.
default: null
email:
description:
- Remote API email.
default: null
hostname:
description:
- Container hostname.
default: null
domainname:
description:
- Container domain name.
default: null
env:
description:
- Pass a dict of environment variables to the container.
default: null
env_file:
version_added: "2.1"
description:
- Pass in a path to a file with environment variable (FOO=BAR).
If a key value is present in both explicitly presented (i.e. as 'env')
and in the environment file, the explicit value will override.
Requires docker-py >= 1.4.0.
default: null
required: false
dns:
description:
- List of custom DNS servers for the container.
required: false
default: null
detach:
description:
- Enable detached mode to leave the container running in background. If
disabled, fail unless the process exits cleanly.
default: true
signal:
version_added: "2.0"
description:
- With the state "killed", you can alter the signal sent to the
container.
required: false
default: KILL
state:
description:
- Assert the container's desired state. "present" only asserts that the
matching containers exist. "started" asserts that the matching
containers both exist and are running, but takes no action if any
configuration has changed. "reloaded" (added in Ansible 1.9) asserts that all matching
containers are running and restarts any that have any images or
configuration out of date. "restarted" unconditionally restarts (or
starts) the matching containers. "stopped" and '"killed" stop and kill
all matching containers. "absent" stops and then' removes any matching
containers.
required: false
default: started
choices:
- present
- started
- reloaded
- restarted
- stopped
- killed
- absent
privileged:
description:
- Whether the container should run in privileged mode or not.
default: false
lxc_conf:
description:
- LXC configuration parameters, such as C(lxc.aa_profile:unconfined).
default: null
stdin_open:
description:
- Keep stdin open after a container is launched.
default: false
version_added: "1.6"
tty:
description:
- Allocate a pseudo-tty within the container.
default: false
version_added: "1.6"
net:
description:
- 'Network mode for the launched container: bridge, none, container:<name|id>'
- or host. Requires docker >= 0.11.
default: false
version_added: "1.8"
pid:
description:
- Set the PID namespace mode for the container (currently only supports 'host'). Requires docker-py >= 1.0.0 and docker >= 1.5.0
required: false
default: None
aliases: []
version_added: "1.9"
registry:
description:
- Remote registry URL to pull images from.
default: DockerHub
aliases: []
version_added: "1.8"
read_only:
description:
- Mount the container's root filesystem as read only
default: null
aliases: []
version_added: "2.0"
restart_policy:
description:
- Container restart policy.
- The 'unless-stopped' choice is only available starting in Ansible 2.1 and for Docker 1.9 and above.
choices: ["no", "on-failure", "always", "unless-stopped"]
default: null
version_added: "1.9"
restart_policy_retry:
description:
- Maximum number of times to restart a container. Leave as "0" for unlimited
retries.
default: 0
version_added: "1.9"
extra_hosts:
version_added: "2.0"
description:
- Dict of custom host-to-IP mappings to be defined in the container
insecure_registry:
description:
- Use insecure private registry by HTTP instead of HTTPS. Needed for
docker-py >= 0.5.0.
default: false
version_added: "1.9"
cpu_set:
description:
- CPUs in which to allow execution. Requires docker-py >= 0.6.0.
required: false
default: null
version_added: "2.0"
cap_add:
description:
- Add capabilities for the container. Requires docker-py >= 0.5.0.
required: false
default: false
version_added: "2.0"
cap_drop:
description:
- Drop capabilities for the container. Requires docker-py >= 0.5.0.
required: false
default: false
aliases: []
version_added: "2.0"
labels:
description:
- Set container labels. Requires docker >= 1.6 and docker-py >= 1.2.0.
required: false
default: null
version_added: "2.1"
stop_timeout:
description:
- How many seconds to wait for the container to stop before killing it.
required: false
default: 10
version_added: "2.0"
timeout:
description:
- Docker daemon response timeout in seconds.
required: false
default: 60
version_added: "2.1"
cpu_shares:
description:
- CPU shares (relative weight). Requires docker-py >= 0.6.0.
required: false
default: 0
version_added: "2.1"
ulimits:
description:
- ulimits, list ulimits with name, soft and optionally
hard limit separated by colons. e.g. nofile:1024:2048
Requires docker-py >= 1.2.0 and docker >= 1.6.0
required: false
default: null
version_added: "2.1"
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
- "Daan Oosterveld (@dusdanig)"
requirements:
- "python >= 2.6"
- "docker-py >= 0.3.0"
- "The docker server >= 0.10.0"
'''
EXAMPLES = '''
# Containers are matched either by name (if provided) or by an exact match of
# the image they were launched with and the command they're running. The module
# can accept either a name to target a container uniquely, or a count to operate
# on multiple containers at once when it makes sense to do so.
# Ensure that a data container with the name "mydata" exists. If no container
# by this name exists, it will be created, but not started.
- name: data container
docker:
name: mydata
image: busybox
state: present
volumes:
- /data
# Ensure that a Redis server is running, using the volume from the data
# container. Expose the default Redis port.
- name: redis container
docker:
name: myredis
image: redis
command: redis-server --appendonly yes
state: started
expose:
- 6379
volumes_from:
- mydata
# Ensure that a container of your application server is running. This will:
# - pull the latest version of your application image from DockerHub.
# - ensure that a container is running with the specified name and exact image.
# If any configuration options have changed, the existing container will be
# stopped and removed, and a new one will be launched in its place.
# - link this container to the existing redis container launched above with
# an alias.
# - grant the container read write permissions for the host's /dev/sda device
# through a node named /dev/xvda
# - bind TCP port 9000 within the container to port 8080 on all interfaces
# on the host.
# - bind UDP port 9001 within the container to port 8081 on the host, only
# listening on localhost.
# - specify 2 ip resolutions.
# - set the environment variable SECRET_KEY to "ssssh".
- name: application container
docker:
name: myapplication
image: someuser/appimage
state: reloaded
pull: always
links:
- "myredis:aliasedredis"
devices:
- "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
extra_hosts:
host1: "192.168.0.1"
host2: "192.168.0.2"
env:
SECRET_KEY: ssssh
# Ensure that exactly five containers of another server are running with this
# exact image and command. If fewer than five are running, more will be launched;
# if more are running, the excess will be stopped.
- name: load-balanced containers
docker:
state: reloaded
count: 5
image: someuser/anotherappimage
command: sleep 1d
# Unconditionally restart a service container. This may be useful within a
# handler, for example.
- name: application service
docker:
name: myservice
image: someuser/serviceimage
state: restarted
# Stop all containers running the specified image.
- name: obsolete container
docker:
image: someuser/oldandbusted
state: stopped
# Stop and remove a container with the specified name.
- name: obsolete container
docker:
name: ohno
image: someuser/oldandbusted
state: absent
# Example Syslogging Output
- name: myservice container
docker:
name: myservice
image: someservice/someimage
state: reloaded
log_driver: syslog
log_opt:
syslog-address: tcp://my-syslog-server:514
syslog-facility: daemon
syslog-tag: myservice
'''
HAS_DOCKER_PY = True
DEFAULT_DOCKER_API_VERSION = None
DEFAULT_TIMEOUT_SECONDS = 60
import sys
import json
import os
import shlex
try:
from urlparse import urlparse
except ImportError:
# python3
from urllib.parse import urlparse
try:
import docker.client
import docker.utils
import docker.errors
from requests.exceptions import RequestException
except ImportError:
HAS_DOCKER_PY = False
if HAS_DOCKER_PY:
try:
from docker.errors import APIError as DockerAPIError
except ImportError:
from docker.client import APIError as DockerAPIError
try:
# docker-py 1.2+
import docker.constants
DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
except (ImportError, AttributeError):
# docker-py less than 1.2
DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION
DEFAULT_TIMEOUT_SECONDS = docker.client.DEFAULT_TIMEOUT_SECONDS
def _human_to_bytes(number):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if isinstance(number, int):
return number
if number.isdigit():
return int(number)
if number[-1] == suffixes[0] and number[-2].isdigit():
return number[:-1]
i = 1
for each in suffixes[1:]:
if number[-len(each):] == suffixes[i]:
return int(number[:-len(each)]) * (1024 ** i)
i = i + 1
raise ValueError('Could not convert %s to integer' % (number,))
def _ansible_facts(container_list):
return {"docker_containers": container_list}
def _docker_id_quirk(inspect):
# XXX: some quirk in docker
if 'ID' in inspect:
inspect['Id'] = inspect['ID']
del inspect['ID']
return inspect
def get_split_image_tag(image):
# If image contains a host or org name, omit that from our check
if '/' in image:
registry, resource = image.rsplit('/', 1)
else:
registry, resource = None, image
# now we can determine if image has a tag or a digest
for s in ['@',':']:
if s in resource:
resource, tag = resource.split(s, 1)
if registry:
resource = '/'.join((registry, resource))
break
else:
tag = "latest"
resource = image
return resource, tag
def normalize_image(image):
"""
Normalize a Docker image name to include the implied :latest tag.
"""
return ":".join(get_split_image_tag(image))
def is_running(container):
'''Return True if an inspected container is in a state we consider "running."'''
return container['State']['Running'] == True and not container['State'].get('Ghost', False)
def get_docker_py_versioninfo():
if hasattr(docker, '__version__'):
# a '__version__' attribute was added to the module but not until
# after 0.3.0 was pushed to pypi. If it's there, use it.
version = []
for part in docker.__version__.split('.'):
try:
version.append(int(part))
except ValueError:
for idx, char in enumerate(part):
if not char.isdigit():
nondigit = part[idx:]
digit = part[:idx]
break
if digit:
version.append(int(digit))
if nondigit:
version.append(nondigit)
elif hasattr(docker.Client, '_get_raw_response_socket'):
# HACK: if '__version__' isn't there, we check for the existence of
# `_get_raw_response_socket` in the docker.Client class, which was
# added in 0.3.0
version = (0, 3, 0)
else:
# This is untrue but this module does not function with a version less
# than 0.3.0 so it's okay to lie here.
version = (0,)
return tuple(version)
def check_dependencies(module):
"""
Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a
helpful error message if it isn't.
"""
if not HAS_DOCKER_PY:
module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.")
else:
versioninfo = get_docker_py_versioninfo()
if versioninfo < (0, 3, 0):
module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.")
class DockerManager(object):
counters = dict(
created=0, started=0, stopped=0, killed=0, removed=0, restarted=0, pulled=0
)
reload_reasons = []
_capabilities = set()
# Map optional parameters to minimum (docker-py version, server APIVersion)
# docker-py version is a tuple of ints because we have to compare them
# server APIVersion is passed to a docker-py function that takes strings
_cap_ver_req = {
'devices': ((0, 7, 0), '1.2'),
'dns': ((0, 3, 0), '1.10'),
'volumes_from': ((0, 3, 0), '1.10'),
'restart_policy': ((0, 5, 0), '1.14'),
'extra_hosts': ((0, 7, 0), '1.3.1'),
'pid': ((1, 0, 0), '1.17'),
'log_driver': ((1, 2, 0), '1.18'),
'log_opt': ((1, 2, 0), '1.18'),
'host_config': ((0, 7, 0), '1.15'),
'cpu_set': ((0, 6, 0), '1.14'),
'cap_add': ((0, 5, 0), '1.14'),
'cap_drop': ((0, 5, 0), '1.14'),
'read_only': ((1, 0, 0), '1.17'),
'labels': ((1, 2, 0), '1.18'),
'stop_timeout': ((0, 5, 0), '1.0'),
'ulimits': ((1, 2, 0), '1.18'),
# Clientside only
'insecure_registry': ((0, 5, 0), '0.0'),
'env_file': ((1, 4, 0), '0.0')
}
def __init__(self, module):
self.module = module
self.binds = None
self.volumes = None
if self.module.params.get('volumes'):
self.binds = []
self.volumes = []
vols = self.module.params.get('volumes')
for vol in vols:
parts = vol.split(":")
# regular volume
if len(parts) == 1:
self.volumes.append(parts[0])
# host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
elif 2 <= len(parts) <= 3:
# default to read-write
mode = 'rw'
# with supplied bind mode
if len(parts) == 3:
if parts[2] not in ["rw", "rw,Z", "rw,z", "z,rw", "Z,rw", "Z", "z", "ro", "ro,Z", "ro,z", "z,ro", "Z,ro"]:
self.module.fail_json(msg='invalid bind mode ' + parts[2])
else:
mode = parts[2]
self.binds.append("%s:%s:%s" % (parts[0], parts[1], mode))
else:
self.module.fail_json(msg='volumes support 1 to 3 arguments')
self.lxc_conf = None
if self.module.params.get('lxc_conf'):
self.lxc_conf = []
options = self.module.params.get('lxc_conf')
for option in options:
parts = option.split(':', 1)
self.lxc_conf.append({"Key": parts[0], "Value": parts[1]})
self.exposed_ports = None
if self.module.params.get('expose'):
self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose'))
self.port_bindings = None
if self.module.params.get('ports'):
self.port_bindings = self.get_port_bindings(self.module.params.get('ports'))
self.links = None
if self.module.params.get('links'):
self.links = self.get_links(self.module.params.get('links'))
self.ulimits = None
if self.module.params.get('ulimits'):
self.ulimits = []
ulimits = self.module.params.get('ulimits')
for ulimit in ulimits:
parts = ulimit.split(":")
if len(parts) == 2:
self.ulimits.append({'name': parts[0], 'soft': int(parts[1]), 'hard': int(parts[1])})
elif len(parts) == 3:
self.ulimits.append({'name': parts[0], 'soft': int(parts[1]), 'hard': int(parts[2])})
else:
self.module.fail_json(msg='ulimits support 2 to 3 arguments')
# Connect to the docker server using any configured host and TLS settings.
env_host = os.getenv('DOCKER_HOST')
env_docker_verify = os.getenv('DOCKER_TLS_VERIFY')
env_cert_path = os.getenv('DOCKER_CERT_PATH')
env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME')
docker_url = module.params.get('docker_url')
if not docker_url:
if env_host:
docker_url = env_host
else:
docker_url = 'unix://var/run/docker.sock'
docker_api_version = module.params.get('docker_api_version')
timeout = module.params.get('timeout')
tls_client_cert = module.params.get('tls_client_cert', None)
if not tls_client_cert and env_cert_path:
tls_client_cert = os.path.join(env_cert_path, 'cert.pem')
tls_client_key = module.params.get('tls_client_key', None)
if not tls_client_key and env_cert_path:
tls_client_key = os.path.join(env_cert_path, 'key.pem')
tls_ca_cert = module.params.get('tls_ca_cert')
if not tls_ca_cert and env_cert_path:
tls_ca_cert = os.path.join(env_cert_path, 'ca.pem')
tls_hostname = module.params.get('tls_hostname')
if tls_hostname is None:
if env_docker_hostname:
tls_hostname = env_docker_hostname
else:
parsed_url = urlparse(docker_url)
if ':' in parsed_url.netloc:
tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
tls_hostname = parsed_url
if not tls_hostname:
tls_hostname = True
# use_tls can be one of four values:
# no: Do not use tls
# encrypt: Use tls. We may do client auth. We will not verify the server
# verify: Use tls. We may do client auth. We will verify the server
# None: Only use tls if the parameters for client auth were specified
# or tls_ca_cert (which requests verifying the server with
# a specific ca certificate)
use_tls = module.params.get('use_tls')
if use_tls is None and env_docker_verify is not None:
use_tls = 'verify'
tls_config = None
if use_tls != 'no':
params = {}
# Setup client auth
if tls_client_cert and tls_client_key:
params['client_cert'] = (tls_client_cert, tls_client_key)
# We're allowed to verify the connection to the server
if use_tls == 'verify' or (use_tls is None and tls_ca_cert):
if tls_ca_cert:
params['ca_cert'] = tls_ca_cert
params['verify'] = True
params['assert_hostname'] = tls_hostname
else:
params['verify'] = True
params['assert_hostname'] = tls_hostname
elif use_tls == 'encrypt':
params['verify'] = False
if params:
# See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296
docker_url = docker_url.replace('tcp://', 'https://')
tls_config = docker.tls.TLSConfig(**params)
self.client = docker.Client(base_url=docker_url,
version=docker_api_version,
tls=tls_config,
timeout=timeout)
self.docker_py_versioninfo = get_docker_py_versioninfo()
env = self.module.params.get('env', None)
env_file = self.module.params.get('env_file', None)
self.environment = self.get_environment(env, env_file)
def _check_capabilities(self):
"""
Create a list of available capabilities
"""
api_version = self.client.version()['ApiVersion']
for cap, req_vers in self._cap_ver_req.items():
if (self.docker_py_versioninfo >= req_vers[0] and
docker.utils.compare_version(req_vers[1], api_version) >= 0):
self._capabilities.add(cap)
def ensure_capability(self, capability, fail=True):
"""
Some of the functionality this ansible module implements are only
available in newer versions of docker. Ensure that the capability
is available here.
If fail is set to False then return True or False depending on whether
we have the capability. Otherwise, simply fail and exit the module if
we lack the capability.
"""
if not self._capabilities:
self._check_capabilities()
if capability in self._capabilities:
return True
if not fail:
return False
api_version = self.client.version()['ApiVersion']
self.module.fail_json(msg='Specifying the `%s` parameter requires'
' docker-py: %s, docker server apiversion %s; found'
' docker-py: %s, server: %s' % (
capability,
'.'.join(map(str, self._cap_ver_req[capability][0])),
self._cap_ver_req[capability][1],
'.'.join(map(str, self.docker_py_versioninfo)),
api_version))
def get_environment(self, env, env_file):
"""
If environment files are combined with explicit environment variables, the explicit environment variables will override the key from the env file.
"""
final_env = {}
if env_file:
self.ensure_capability('env_file')
parsed_env_file = docker.utils.parse_env_file(env_file)
for name, value in parsed_env_file.iteritems():
final_env[name] = str(value)
if env:
for name, value in env.iteritems():
final_env[name] = str(value)
return final_env
def get_links(self, links):
"""
Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link
"""
processed_links = {}
for link in links:
parsed_link = link.split(':', 1)
if(len(parsed_link) == 2):
processed_links[parsed_link[0]] = parsed_link[1]
else:
processed_links[parsed_link[0]] = parsed_link[0]
return processed_links
def get_exposed_ports(self, expose_list):
"""
Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax.
"""
if expose_list:
exposed = []
for port in expose_list:
port = str(port).strip()
if port.endswith('/tcp') or port.endswith('/udp'):
port_with_proto = tuple(port.split('/'))
else:
# assume tcp protocol if not specified
port_with_proto = (port, 'tcp')
exposed.append(port_with_proto)
return exposed
else:
return None
def get_start_params(self):
"""
Create start params
"""
params = {
'lxc_conf': self.lxc_conf,
'binds': self.binds,
'port_bindings': self.port_bindings,
'publish_all_ports': self.module.params.get('publish_all_ports'),
'privileged': self.module.params.get('privileged'),
'links': self.links,
'network_mode': self.module.params.get('net'),
}
optionals = {}
for optional_param in ('devices', 'dns', 'volumes_from',
'restart_policy', 'restart_policy_retry', 'pid', 'extra_hosts',
'log_driver', 'cap_add', 'cap_drop', 'read_only', 'log_opt'):
optionals[optional_param] = self.module.params.get(optional_param)
if optionals['devices'] is not None:
self.ensure_capability('devices')
params['devices'] = optionals['devices']
if optionals['dns'] is not None:
self.ensure_capability('dns')
params['dns'] = optionals['dns']
if optionals['volumes_from'] is not None:
self.ensure_capability('volumes_from')
params['volumes_from'] = optionals['volumes_from']
if optionals['restart_policy'] is not None:
self.ensure_capability('restart_policy')
params['restart_policy'] = { 'Name': optionals['restart_policy'] }
if params['restart_policy']['Name'] == 'on-failure':
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
# docker_py only accepts 'host' or None
if 'pid' in optionals and not optionals['pid']:
optionals['pid'] = None
if optionals['pid'] is not None:
self.ensure_capability('pid')
params['pid_mode'] = optionals['pid']
if optionals['extra_hosts'] is not None:
self.ensure_capability('extra_hosts')
params['extra_hosts'] = optionals['extra_hosts']
if optionals['log_driver'] is not None:
self.ensure_capability('log_driver')
log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON)
if optionals['log_opt'] is not None:
for k, v in optionals['log_opt'].iteritems():
log_config.set_config_value(k, v)
log_config.type = optionals['log_driver']
params['log_config'] = log_config
if optionals['cap_add'] is not None:
self.ensure_capability('cap_add')
params['cap_add'] = optionals['cap_add']
if optionals['cap_drop'] is not None:
self.ensure_capability('cap_drop')
params['cap_drop'] = optionals['cap_drop']
if optionals['read_only'] is not None:
self.ensure_capability('read_only')
params['read_only'] = optionals['read_only']
return params
def create_host_config(self):
"""
Create HostConfig object
"""
params = self.get_start_params()
return docker.utils.create_host_config(**params)
def get_port_bindings(self, ports):
"""
Parse the `ports` string into a port bindings dict for the `start_container` call.
"""
binds = {}
for port in ports:
# ports could potentially be an array like [80, 443], so we make sure they're strings
# before splitting
parts = str(port).split(':')
container_port = parts[-1]
if '/' not in container_port:
container_port = int(parts[-1])
p_len = len(parts)
if p_len == 1:
# Bind `container_port` of the container to a dynamically
# allocated TCP port on all available interfaces of the host
# machine.
bind = ('0.0.0.0',)
elif p_len == 2:
# Bind `container_port` of the container to port `parts[0]` on
# all available interfaces of the host machine.
bind = ('0.0.0.0', int(parts[0]))
elif p_len == 3:
# Bind `container_port` of the container to port `parts[1]` on
# IP `parts[0]` of the host machine. If `parts[1]` empty bind
# to a dynamically allocated port of IP `parts[0]`.
bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
if container_port in binds:
old_bind = binds[container_port]
if isinstance(old_bind, list):
# append to list if it already exists
old_bind.append(bind)
else:
# otherwise create list that contains the old and new binds
binds[container_port] = [binds[container_port], bind]
else:
binds[container_port] = bind
return binds
def get_summary_message(self):
'''
Generate a message that briefly describes the actions taken by this
task, in English.
'''
parts = []
for k, v in self.counters.iteritems():
if v == 0:
continue
if v == 1:
plural = ""
else:
plural = "s"
parts.append("%s %d container%s" % (k, v, plural))
if parts:
return ", ".join(parts) + "."
else:
return "No action taken."
def get_reload_reason_message(self):
'''
Generate a message describing why any reloaded containers were reloaded.
'''
if self.reload_reasons:
return ", ".join(self.reload_reasons)
else:
return None
def get_summary_counters_msg(self):
msg = ""
for k, v in self.counters.iteritems():
msg = msg + "%s %d " % (k, v)
return msg
def increment_counter(self, name):
self.counters[name] = self.counters[name] + 1
def has_changed(self):
for k, v in self.counters.iteritems():
if v > 0:
return True
return False
def get_inspect_image(self):
try:
return self.client.inspect_image(self.module.params.get('image'))
except DockerAPIError as e:
if e.response.status_code == 404:
return None
else:
raise e
def get_image_repo_tags(self):
image, tag = get_split_image_tag(self.module.params.get('image'))
if tag is None:
tag = 'latest'
resource = '%s:%s' % (image, tag)
for image in self.client.images(name=image):
if resource in image.get('RepoTags', []):
return image['RepoTags']
return []
def get_inspect_containers(self, containers):
inspect = []
for i in containers:
details = self.client.inspect_container(i['Id'])
details = _docker_id_quirk(details)
inspect.append(details)
return inspect
def get_differing_containers(self):
"""
Inspect all matching, running containers, and return those that were
started with parameters that differ from the ones that are provided
during this module run. A list containing the differing
containers will be returned, and a short string describing the specific
difference encountered in each container will be appended to
reload_reasons.
This generates the set of containers that need to be stopped and
started with new parameters with state=reloaded.
"""
running = self.get_running_containers()
current = self.get_inspect_containers(running)
defaults = self.client.info()
#Get API version
api_version = self.client.version()['ApiVersion']
image = self.get_inspect_image()
if image is None:
# The image isn't present. Assume that we're about to pull a new
# tag and *everything* will be restarted.
#
# This will give false positives if you untag an image on the host
# and there's nothing more to pull.
return current
differing = []
for container in current:
# IMAGE
# Compare the image by ID rather than name, so that containers
# will be restarted when new versions of an existing image are
# pulled.
if container['Image'] != image['Id']:
self.reload_reasons.append('image ({0} => {1})'.format(container['Image'], image['Id']))
differing.append(container)
continue
# ENTRYPOINT
expected_entrypoint = self.module.params.get('entrypoint')
if expected_entrypoint:
expected_entrypoint = shlex.split(expected_entrypoint)
actual_entrypoint = container["Config"]["Entrypoint"]
if actual_entrypoint != expected_entrypoint:
self.reload_reasons.append(
'entrypoint ({0} => {1})'
.format(actual_entrypoint, expected_entrypoint)
)
differing.append(container)
continue
# COMMAND
expected_command = self.module.params.get('command')
if expected_command:
expected_command = shlex.split(expected_command)
actual_command = container["Config"]["Cmd"]
if actual_command != expected_command:
self.reload_reasons.append('command ({0} => {1})'.format(actual_command, expected_command))
differing.append(container)
continue
# EXPOSED PORTS
expected_exposed_ports = set((image['ContainerConfig'].get('ExposedPorts') or {}).keys())
for p in (self.exposed_ports or []):
expected_exposed_ports.add("/".join(p))
actually_exposed_ports = set((container["Config"].get("ExposedPorts") or {}).keys())
if actually_exposed_ports != expected_exposed_ports:
self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports))
differing.append(container)
continue
# VOLUMES
expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys())
if self.volumes:
expected_volume_keys.update(self.volumes)
actual_volume_keys = set((container['Config']['Volumes'] or {}).keys())
if actual_volume_keys != expected_volume_keys:
self.reload_reasons.append('volumes ({0} => {1})'.format(actual_volume_keys, expected_volume_keys))
differing.append(container)
continue
# ULIMITS
expected_ulimit_keys = set(map(lambda x: '%s:%s:%s' % (x['name'],x['soft'],x['hard']), self.ulimits or []))
actual_ulimit_keys = set(map(lambda x: '%s:%s:%s' % (x['Name'],x['Soft'],x['Hard']), (container['HostConfig']['Ulimits'] or [])))
if actual_ulimit_keys != expected_ulimit_keys:
self.reload_reasons.append('ulimits ({0} => {1})'.format(actual_ulimit_keys, expected_ulimit_keys))
differing.append(container)
continue
# CPU_SHARES
expected_cpu_shares = self.module.params.get('cpu_shares')
actual_cpu_shares = container['HostConfig']['CpuShares']
if expected_cpu_shares and actual_cpu_shares != expected_cpu_shares:
self.reload_reasons.append('cpu_shares ({0} => {1})'.format(actual_cpu_shares, expected_cpu_shares))
differing.append(container)
continue
# MEM_LIMIT
try:
expected_mem = _human_to_bytes(self.module.params.get('memory_limit'))
except ValueError as e:
self.module.fail_json(msg=str(e))
#For v1.19 API and above use HostConfig, otherwise use Config
if docker.utils.compare_version('1.19', api_version) >= 0:
actual_mem = container['HostConfig']['Memory']
else:
actual_mem = container['Config']['Memory']
if expected_mem and actual_mem != expected_mem:
self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem))
differing.append(container)
continue
# ENVIRONMENT
# actual_env is likely to include environment variables injected by
# the Dockerfile.
expected_env = {}
for image_env in image['ContainerConfig']['Env'] or []:
name, value = image_env.split('=', 1)
expected_env[name] = value
if self.environment:
for name, value in self.environment.iteritems():
expected_env[name] = str(value)
actual_env = {}
for container_env in container['Config']['Env'] or []:
name, value = container_env.split('=', 1)
actual_env[name] = value
if actual_env != expected_env:
# Don't include the environment difference in the output.
self.reload_reasons.append('environment {0} => {1}'.format(actual_env, expected_env))
differing.append(container)
continue
# LABELS
expected_labels = {}
for name, value in self.module.params.get('labels').iteritems():
expected_labels[name] = str(value)
if type(container['Config']['Labels']) is dict:
actual_labels = container['Config']['Labels']
else:
for container_label in container['Config']['Labels'] or []:
name, value = container_label.split('=', 1)
actual_labels[name] = value
if actual_labels != expected_labels:
self.reload_reasons.append('labels {0} => {1}'.format(actual_labels, expected_labels))
differing.append(container)
continue
# HOSTNAME
expected_hostname = self.module.params.get('hostname')
actual_hostname = container['Config']['Hostname']
if expected_hostname and actual_hostname != expected_hostname:
self.reload_reasons.append('hostname ({0} => {1})'.format(actual_hostname, expected_hostname))
differing.append(container)
continue
# DOMAINNAME
expected_domainname = self.module.params.get('domainname')
actual_domainname = container['Config']['Domainname']
if expected_domainname and actual_domainname != expected_domainname:
self.reload_reasons.append('domainname ({0} => {1})'.format(actual_domainname, expected_domainname))
differing.append(container)
continue
# DETACH
# We don't have to check for undetached containers. If it wasn't
# detached, it would have stopped before the playbook continued!
# NAME
# We also don't have to check name, because this is one of the
# criteria that's used to determine which container(s) match in
# the first place.
# STDIN_OPEN
expected_stdin_open = self.module.params.get('stdin_open')
actual_stdin_open = container['Config']['OpenStdin']
if actual_stdin_open != expected_stdin_open:
self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open))
differing.append(container)
continue
# TTY
expected_tty = self.module.params.get('tty')
actual_tty = container['Config']['Tty']
if actual_tty != expected_tty:
self.reload_reasons.append('tty ({0} => {1})'.format(actual_tty, expected_tty))
differing.append(container)
continue
# -- "start" call differences --
# LXC_CONF
if self.lxc_conf:
expected_lxc = set(self.lxc_conf)
actual_lxc = set(container['HostConfig']['LxcConf'] or [])
if actual_lxc != expected_lxc:
self.reload_reasons.append('lxc_conf ({0} => {1})'.format(actual_lxc, expected_lxc))
differing.append(container)
continue
# BINDS
expected_binds = set()
if self.binds:
for bind in self.binds:
expected_binds.add(bind)
actual_binds = set()
for bind in (container['HostConfig']['Binds'] or []):
if len(bind.split(':')) == 2:
actual_binds.add(bind + ":rw")
else:
actual_binds.add(bind)
if actual_binds != expected_binds:
self.reload_reasons.append('binds ({0} => {1})'.format(actual_binds, expected_binds))
differing.append(container)
continue
# PORT BINDINGS
expected_bound_ports = {}
if self.port_bindings:
for container_port, config in self.port_bindings.iteritems():
if isinstance(container_port, int):
container_port = "{0}/tcp".format(container_port)
if len(config) == 1:
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for hostip, hostport in config:
expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
actual_bound_ports = container['HostConfig']['PortBindings'] or {}
if actual_bound_ports != expected_bound_ports:
self.reload_reasons.append('port bindings ({0} => {1})'.format(actual_bound_ports, expected_bound_ports))
differing.append(container)
continue
# PUBLISHING ALL PORTS
# What we really care about is the set of ports that is actually
# published. That should be caught above.
# PRIVILEGED
expected_privileged = self.module.params.get('privileged')
actual_privileged = container['HostConfig']['Privileged']
if actual_privileged != expected_privileged:
self.reload_reasons.append('privileged ({0} => {1})'.format(actual_privileged, expected_privileged))
differing.append(container)
continue
# LINKS
expected_links = set()
for link, alias in (self.links or {}).iteritems():
expected_links.add("/{0}:{1}/{2}".format(link, container["Name"], alias))
actual_links = set(container['HostConfig']['Links'] or [])
if actual_links != expected_links:
self.reload_reasons.append('links ({0} => {1})'.format(actual_links, expected_links))
differing.append(container)
continue
# NETWORK MODE
expected_netmode = self.module.params.get('net') or 'bridge'
actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge'
if actual_netmode != expected_netmode:
self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode))
differing.append(container)
continue
# DEVICES
expected_devices = set()
for device in (self.module.params.get('devices') or []):
if len(device.split(':')) == 2:
expected_devices.add(device + ":rwm")
else:
expected_devices.add(device)
actual_devices = set()
for device in (container['HostConfig']['Devices'] or []):
actual_devices.add("{PathOnHost}:{PathInContainer}:{CgroupPermissions}".format(**device))
if actual_devices != expected_devices:
self.reload_reasons.append('devices ({0} => {1})'.format(actual_devices, expected_devices))
differing.append(container)
continue
# DNS
expected_dns = set(self.module.params.get('dns') or [])
actual_dns = set(container['HostConfig']['Dns'] or [])
if actual_dns != expected_dns:
self.reload_reasons.append('dns ({0} => {1})'.format(actual_dns, expected_dns))
differing.append(container)
continue
# VOLUMES_FROM
expected_volumes_from = set(self.module.params.get('volumes_from') or [])
actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or [])
if actual_volumes_from != expected_volumes_from:
self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from))
differing.append(container)
# LOG_DRIVER
if self.ensure_capability('log_driver', False):
expected_log_driver = self.module.params.get('log_driver') or defaults['LoggingDriver']
actual_log_driver = container['HostConfig']['LogConfig']['Type']
if actual_log_driver != expected_log_driver:
self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver))
differing.append(container)
continue
if self.ensure_capability('log_opt', False):
expected_logging_opts = self.module.params.get('log_opt') or {}
actual_log_opts = container['HostConfig']['LogConfig']['Config']
if len(set(expected_logging_opts.items()) - set(actual_log_opts.items())) != 0:
log_opt_reasons = {
'added': dict(set(expected_logging_opts.items()) - set(actual_log_opts.items())),
'removed': dict(set(actual_log_opts.items()) - set(expected_logging_opts.items()))
}
self.reload_reasons.append('log_opt ({0})'.format(log_opt_reasons))
differing.append(container)
return differing
def get_deployed_containers(self):
"""
Return any matching containers that are already present.
"""
entrypoint = self.module.params.get('entrypoint')
if entrypoint is not None:
entrypoint = shlex.split(entrypoint)
command = self.module.params.get('command')
if command is not None:
command = shlex.split(command)
name = self.module.params.get('name')
if name and not name.startswith('/'):
name = '/' + name
deployed = []
# "images" will be a collection of equivalent "name:tag" image names
# that map to the same Docker image.
inspected = self.get_inspect_image()
if inspected:
repo_tags = self.get_image_repo_tags()
else:
repo_tags = [normalize_image(self.module.params.get('image'))]
for container in self.client.containers(all=True):
details = None
if name:
name_list = container.get('Names')
if name_list is None:
name_list = []
matches = name in name_list
else:
details = self.client.inspect_container(container['Id'])
details = _docker_id_quirk(details)
running_image = normalize_image(details['Config']['Image'])
image_matches = running_image in repo_tags
if command == None:
command_matches = True
else:
command_matches = (command == details['Config']['Cmd'])
if entrypoint == None:
entrypoint_matches = True
else:
entrypoint_matches = (
entrypoint == details['Config']['Entrypoint']
)
matches = (image_matches and command_matches and
entrypoint_matches)
if matches:
if not details:
details = self.client.inspect_container(container['Id'])
details = _docker_id_quirk(details)
deployed.append(details)
return deployed
def get_running_containers(self):
return [c for c in self.get_deployed_containers() if is_running(c)]
def pull_image(self):
extra_params = {}
if self.module.params.get('insecure_registry'):
if self.ensure_capability('insecure_registry', fail=False):
extra_params['insecure_registry'] = self.module.params.get('insecure_registry')
resource = self.module.params.get('image')
image, tag = get_split_image_tag(resource)
if self.module.params.get('username'):
try:
self.client.login(
self.module.params.get('username'),
password=self.module.params.get('password'),
email=self.module.params.get('email'),
registry=self.module.params.get('registry')
)
except Exception as e:
self.module.fail_json(msg="failed to login to the remote registry, check your username/password.", error=repr(e))
try:
changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params))
pull_success = False
for change in changes:
status = json.loads(change).get('status', '')
if status.startswith('Status: Image is up to date for'):
# Image is already up to date. Don't increment the counter.
pull_success = True
break
elif (status.startswith('Status: Downloaded newer image for') or
status.startswith('Download complete')):
# Image was updated. Increment the pull counter.
self.increment_counter('pulled')
pull_success = True
break
if not pull_success:
# Unrecognized status string.
self.module.fail_json(msg="Unrecognized status from pull.", status=status, changes=changes)
except Exception as e:
self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e))
def create_containers(self, count=1):
try:
mem_limit = _human_to_bytes(self.module.params.get('memory_limit'))
except ValueError as e:
self.module.fail_json(msg=str(e))
api_version = self.client.version()['ApiVersion']
params = {'image': self.module.params.get('image'),
'entrypoint': self.module.params.get('entrypoint'),
'command': self.module.params.get('command'),
'ports': self.exposed_ports,
'volumes': self.volumes,
'environment': self.environment,
'labels': self.module.params.get('labels'),
'hostname': self.module.params.get('hostname'),
'domainname': self.module.params.get('domainname'),
'detach': self.module.params.get('detach'),
'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'),
'cpuset': self.module.params.get('cpu_set'),
'cpu_shares': self.module.params.get('cpu_shares'),
'user': self.module.params.get('docker_user'),
}
if self.ensure_capability('host_config', fail=False):
params['host_config'] = self.create_host_config()
#For v1.19 API and above use HostConfig, otherwise use Config
if docker.utils.compare_version('1.19', api_version) < 0:
params['mem_limit'] = mem_limit
else:
params['host_config']['Memory'] = mem_limit
if self.ulimits is not None:
self.ensure_capability('ulimits')
params['host_config']['ulimits'] = self.ulimits
def do_create(count, params):
results = []
for _ in range(count):
result = self.client.create_container(**params)
self.increment_counter('created')
results.append(result)
return results
try:
containers = do_create(count, params)
except docker.errors.APIError as e:
if e.response.status_code != 404:
raise
self.pull_image()
containers = do_create(count, params)
return containers
def start_containers(self, containers):
params = {}
if not self.ensure_capability('host_config', fail=False):
params = self.get_start_params()
for i in containers:
self.client.start(i)
self.increment_counter('started')
if not self.module.params.get('detach'):
status = self.client.wait(i['Id'])
if status != 0:
output = self.client.logs(i['Id'], stdout=True, stderr=True,
stream=False, timestamps=False)
self.module.fail_json(status=status, msg=output)
def stop_containers(self, containers):
for i in containers:
self.client.stop(i['Id'], self.module.params.get('stop_timeout'))
self.increment_counter('stopped')
return [self.client.wait(i['Id']) for i in containers]
def remove_containers(self, containers):
for i in containers:
self.client.remove_container(i['Id'])
self.increment_counter('removed')
def kill_containers(self, containers):
for i in containers:
self.client.kill(i['Id'], self.module.params.get('signal'))
self.increment_counter('killed')
def restart_containers(self, containers):
for i in containers:
self.client.restart(i['Id'])
self.increment_counter('restarted')
class ContainerSet:
def __init__(self, manager):
self.manager = manager
self.running = []
self.deployed = []
self.changed = []
def refresh(self):
'''
Update our view of the matching containers from the Docker daemon.
'''
self.deployed = self.manager.get_deployed_containers()
self.running = [c for c in self.deployed if is_running(c)]
def notice_changed(self, containers):
'''
Record a collection of containers as "changed".
'''
self.changed.extend(containers)
def present(manager, containers, count, name):
'''Ensure that exactly `count` matching containers exist in any state.'''
containers.refresh()
delta = count - len(containers.deployed)
if delta > 0:
created = manager.create_containers(delta)
containers.notice_changed(manager.get_inspect_containers(created))
if delta < 0:
# If both running and stopped containers exist, remove
# stopped containers first.
containers.deployed.sort(lambda cx, cy: cmp(is_running(cx), is_running(cy)))
to_stop = []
to_remove = []
for c in containers.deployed[0:-delta]:
if is_running(c):
to_stop.append(c)
to_remove.append(c)
manager.stop_containers(to_stop)
containers.notice_changed(manager.get_inspect_containers(to_remove))
manager.remove_containers(to_remove)
def started(manager, containers, count, name):
'''Ensure that exactly `count` matching containers exist and are running.'''
containers.refresh()
delta = count - len(containers.running)
if delta > 0:
if name and containers.deployed:
# A stopped container exists with the requested name.
# Clean it up before attempting to start a new one.
manager.remove_containers(containers.deployed)
created = manager.create_containers(delta)
manager.start_containers(created)
containers.notice_changed(manager.get_inspect_containers(created))
if delta < 0:
excess = containers.running[0:-delta]
containers.notice_changed(manager.get_inspect_containers(excess))
manager.stop_containers(excess)
manager.remove_containers(excess)
def reloaded(manager, containers, count, name):
'''
Ensure that exactly `count` matching containers exist and are
running. If any associated settings have been changed (volumes,
ports or so on), restart those containers.
'''
containers.refresh()
for container in manager.get_differing_containers():
manager.stop_containers([container])
manager.remove_containers([container])
started(manager, containers, count, name)
def restarted(manager, containers, count, name):
'''
Ensure that exactly `count` matching containers exist and are
running. Unconditionally restart any that were already running.
'''
containers.refresh()
for container in manager.get_differing_containers():
manager.stop_containers([container])
manager.remove_containers([container])
containers.refresh()
manager.restart_containers(containers.running)
started(manager, containers, count, name)
def stopped(manager, containers, count, name):
'''Stop any matching containers that are running.'''
containers.refresh()
manager.stop_containers(containers.running)
containers.notice_changed(manager.get_inspect_containers(containers.running))
def killed(manager, containers, count, name):
'''Kill any matching containers that are running.'''
containers.refresh()
manager.kill_containers(containers.running)
containers.notice_changed(manager.get_inspect_containers(containers.running))
def absent(manager, containers, count, name):
'''Stop and remove any matching containers.'''
containers.refresh()
manager.stop_containers(containers.running)
containers.notice_changed(manager.get_inspect_containers(containers.deployed))
manager.remove_containers(containers.deployed)
def main():
module = AnsibleModule(
argument_spec = dict(
count = dict(default=1, type='int'),
image = dict(required=True),
pull = dict(required=False, default='missing', choices=['missing', 'always']),
entrypoint = dict(required=False, default=None, type='str'),
command = dict(required=False, default=None),
expose = dict(required=False, default=None, type='list'),
ports = dict(required=False, default=None, type='list'),
publish_all_ports = dict(default=False, type='bool'),
volumes = dict(default=None, type='list'),
volumes_from = dict(default=None, type='list'),
links = dict(default=None, type='list'),
devices = dict(default=None, type='list'),
memory_limit = dict(default=0),
memory_swap = dict(default=0, type='int'),
cpu_shares = dict(default=0, type='int'),
docker_url = dict(),
use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']),
tls_client_cert = dict(required=False, default=None, type='path'),
tls_client_key = dict(required=False, default=None, type='path'),
tls_ca_cert = dict(required=False, default=None, type='path'),
tls_hostname = dict(required=False, type='str', default=None),
docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'),
docker_user = dict(default=None),
username = dict(default=None),
password = dict(no_log=True),
email = dict(),
registry = dict(),
hostname = dict(default=None),
domainname = dict(default=None),
env = dict(type='dict'),
env_file = dict(default=None),
dns = dict(default=None, type='list'),
detach = dict(default=True, type='bool'),
state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']),
signal = dict(default=None),
restart_policy = dict(default=None, choices=['always', 'on-failure', 'no', 'unless-stopped']),
restart_policy_retry = dict(default=0, type='int'),
extra_hosts = dict(type='dict'),
debug = dict(default=False, type='bool'),
privileged = dict(default=False, type='bool'),
stdin_open = dict(default=False, type='bool'),
tty = dict(default=False, type='bool'),
lxc_conf = dict(default=None, type='list'),
name = dict(default=None),
net = dict(default=None),
pid = dict(default=None),
insecure_registry = dict(default=False, type='bool'),
log_driver = dict(default=None, choices=['json-file', 'none', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs']),
log_opt = dict(default=None, type='dict'),
cpu_set = dict(default=None),
cap_add = dict(default=None, type='list'),
cap_drop = dict(default=None, type='list'),
read_only = dict(default=None, type='bool'),
labels = dict(default={}, type='dict'),
stop_timeout = dict(default=10, type='int'),
timeout = dict(required=False, default=DEFAULT_TIMEOUT_SECONDS, type='int'),
ulimits = dict(default=None, type='list'),
),
required_together = (
['tls_client_cert', 'tls_client_key'],
),
)
check_dependencies(module)
try:
manager = DockerManager(module)
count = module.params.get('count')
name = module.params.get('name')
pull = module.params.get('pull')
state = module.params.get('state')
if state == 'running':
# Renamed running to started in 1.9
state = 'started'
if count < 0:
module.fail_json(msg="Count must be greater than zero")
if count > 1 and name:
module.fail_json(msg="Count and name must not be used together")
# Explicitly pull new container images, if requested. Do this before
# noticing running and deployed containers so that the image names
# will differ if a newer image has been pulled.
# Missing images should be pulled first to avoid downtime when old
# container is stopped, but image for new one is now downloaded yet.
# It also prevents removal of running container before realizing
# that requested image cannot be retrieved.
if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None):
manager.pull_image()
containers = ContainerSet(manager)
if state == 'present':
present(manager, containers, count, name)
elif state == 'started':
started(manager, containers, count, name)
elif state == 'reloaded':
reloaded(manager, containers, count, name)
elif state == 'restarted':
restarted(manager, containers, count, name)
elif state == 'stopped':
stopped(manager, containers, count, name)
elif state == 'killed':
killed(manager, containers, count, name)
elif state == 'absent':
absent(manager, containers, count, name)
else:
module.fail_json(msg='Unrecognized state %s. Must be one of: '
'present; started; reloaded; restarted; '
'stopped; killed; absent.' % state)
module.exit_json(changed=manager.has_changed(),
msg=manager.get_summary_message(),
summary=manager.counters,
reload_reasons=manager.get_reload_reason_message(),
ansible_facts=_ansible_facts(containers.changed))
except DockerAPIError as e:
module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation)
except RequestException as e:
module.fail_json(changed=manager.has_changed(), msg=repr(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
romain-dartigues/ansible-modules-core
|
cloud/docker/_docker.py
|
Python
|
gpl-3.0
| 74,953
|
[
"VisIt"
] |
f3b89dc1f96a4082ea26684a9e986bfea1586b14374b42c85de6021df6b70477
|
import os
import unittest
from monty.os.path import which
from monty.serialization import loadfn
from pymatgen.command_line.mcsqs_caller import run_mcsqs
from pymatgen.util.testing import PymatgenTest
__author__ = "Handong Ling, Rachel Woods-Robinson"
__maintainer__ = "Handong Ling, Rachel Woods-Robinson"
__email__ = "[email protected], [email protected]"
test_dir = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "test_files", "mcsqs"
)
@unittest.skipIf(not which("mcsqs"), "mcsqs executable not present")
class McsqsCallerTest(PymatgenTest):
def setUp(self):
self.pztstructs = loadfn(os.path.join(test_dir, "pztstructs.json"))
self.pztstructs2 = loadfn(os.path.join(test_dir, "pztstructs2.json"))
self.struc = self.get_structure("Pb2TiZrO6")
self.perfect_match_zzn_rs = loadfn(
os.path.join(test_dir, "perfect_match_zzn_rs.json")
)
def test_mcsqs_caller_supercell(self):
struc = self.struc.copy()
struc.replace_species(
{"Ti": {"Ti": 0.5, "Zr": 0.5}, "Zr": {"Ti": 0.5, "Zr": 0.5}}
)
sqs = run_mcsqs(
struc, {2: 6, 3: 4}, scaling=[2, 1, 1], search_time=0.01, instances=1
)
matches = [sqs.bestsqs.matches(s) for s in self.pztstructs]
self.assertIn(True, matches)
def test_mcsqs_caller_total_atoms(self):
struc = self.struc.copy()
struc.replace_species(
{"Ti": {"Ti": 0.5, "Zr": 0.5}, "Zr": {"Ti": 0.5, "Zr": 0.5}}
)
sqs = run_mcsqs(struc, {2: 6, 3: 4}, scaling=2, search_time=0.01, instances=1)
matches = [sqs.bestsqs.matches(s) for s in self.pztstructs2]
self.assertIn(True, matches)
def test_mcsqs_caller_total_atoms_auto_instances(self):
struc = self.struc.copy()
struc.replace_species(
{"Ti": {"Ti": 0.5, "Zr": 0.5}, "Zr": {"Ti": 0.5, "Zr": 0.5}}
)
sqs = run_mcsqs(
struc, {2: 6, 3: 4}, scaling=2, search_time=0.01, instances=None
)
matches = [sqs.bestsqs.matches(s) for s in self.pztstructs2]
self.assertIn(True, matches)
def test_mcsqs_caller_parallel(self):
# explicitly test with four instances
struc = self.struc.copy()
struc.replace_species(
{"Ti": {"Ti": 0.5, "Zr": 0.5}, "Zr": {"Ti": 0.5, "Zr": 0.5}}
)
sqs = run_mcsqs(struc, {2: 6, 3: 4}, scaling=2, search_time=0.01, instances=4)
matches = [sqs.bestsqs.matches(s) for s in self.pztstructs2]
self.assertIn(True, matches)
def test_mcsqs_perfect_match_error(self):
scale = 32 / self.perfect_match_zzn_rs.num_sites
sqs = run_mcsqs(
self.perfect_match_zzn_rs,
{2: 6, 3: 4},
scaling=scale,
search_time=1,
instances=1,
)
self.assertEqual(sqs.objective_function, "Perfect_match")
def test_mcsqs_perfect_match_error_parallel(self):
scale = 32 / self.perfect_match_zzn_rs.num_sites
sqs = run_mcsqs(
self.perfect_match_zzn_rs,
{2: 6, 3: 4},
scaling=scale,
search_time=1,
instances=4,
)
self.assertEqual(sqs.objective_function, "Perfect_match")
def test_mcsqs_caller_runtime_error(self):
struc = self.struc.copy()
struc.replace_species(
{"Ti": {"Ti": 0.5, "Zr": 0.5}, "Zr": {"Ti": 0.5, "Zr": 0.5}}
)
struc.replace_species({"Pb": {"Ti": 0.2, "Pb": 0.8}})
struc.replace_species({"O": {"F": 0.8, "O": 0.2}})
self.assertRaises(RuntimeError, run_mcsqs, struc, {2: 6, 3: 4}, 10, 0.000001)
|
mbkumar/pymatgen
|
pymatgen/command_line/tests/test_mcsqs_caller.py
|
Python
|
mit
| 3,703
|
[
"pymatgen"
] |
d71c01d419579acab28c47e711b0fc1acdde2a09221f3fd3ebd86bccb74e9950
|
#!/usr/bin/env ipython3
##
# @file
# Calculate approximative stellar masses from V-band magnitude, V-I color, distance to dwarf galaxy.
# Read in [some].bin.[MV,V-I], output ID,Mstar
# (c) 2013 Pascal Steger, [email protected]
import numpy
import sys
if(len(sys.argv)<2):
print "use: stellarmass.py [car,scl,sex,for]"
exit(1)
# choose simulation
dwarf = sys.argv[1]
dir = gp.files.machine
print(dir+dwarf+"/table_merged.bin")
delim=[0,22,3,3,6,4,3,5,6,6,7,5,6,5,6,5,6]
ID=numpy.genfromtxt(dir+dwarf+"/table_merged.bin",skiprows=29,unpack=True,usecols=(0,1),delimiter=delim,dtype="string")
RAh,RAm,RAs,DEd,DEm,DEs,Vmag,VI,VHel,e_VHel,SigFe,e_SigFe,SigMg,e_SigMg,PM=numpy.genfromtxt(dir+dwarf+"/table_merged.bin",skiprows=29,unpack=True,usecols=tuple(range(2,17)),delimiter=delim,filling_values=-1)
print('Vmag = ',Vmag[0:10])
MCMD,MvCMD,VICMD=numpy.genfromtxt(dir+dwarf+"/../SCMD/SCMD.dat",\
skiprows=1,unpack=True,usecols=(0,4,8),filling_values=-1)
# luminosity distance, measured in [parsecs]
kpc = 1000
DL= {
'for': lambda x: x * (138),#+/- 8
'car': lambda x: x * (101),#+/- 5
'sex': lambda x: x * (86), #+/- 4
'scl': lambda x: x * (79) #+/- 4
}[dwarf](kpc)
# print("DL = ",DL)
import matplotlib
matplotlib.use('pdf')
from pylab import *
ion();subplot(111)
n,bins,rectangles = hist(PM, bins=20, normed=True)
axvline(x=0.95,color='r')
xlabel(r'PM')
ylabel(r'number')
xlim([0,1])
savefig(dir+dwarf+"/PM.pdf")
ioff();clf()
# only use stars which are members of the dwarf
pm = (PM>0.95)*(VI<70)
print(pm)
print("fraction of members = ",1.0*sum(pm)/len(pm))
ID=ID[1][pm]; RAh=RAh[pm]; RAm=RAm[pm]; DEd=DEd[pm]; DEm=DEm[pm]; DEs=DEs[pm]
Vmag = Vmag[pm]; VI=VI[pm]; VHel=VHel[pm]; e_VHel=e_VHel[pm]
SigFe=SigFe[pm]; e_SigFe=e_SigFe[pm]; SigMg=SigMg[pm]; e_SigMg=e_SigMg[pm]
PM=PM[pm]
VMag = Vmag-5.0*(numpy.log10(DL)-1.0)
minVMag,maxVMag=numpy.min(VMag),numpy.max(VMag)
minVI,maxVI =numpy.min(VI),numpy.max(VI)
print("min, max of VMag= ",minVMag,maxVMag)
windowCMD = (minVMag<=MvCMD)*(MvCMD<=maxVMag)*(minVI<=VICMD)*(VICMD<=maxVI)
MCMD = MCMD[windowCMD]
MvCMD = MvCMD[windowCMD]
VICMD = VICMD[windowCMD]
ion(); subplot(111)
# set_xaxis('log')
plot(VI,VMag,'b.',linewidth=1)
# errorbar(rmean,rho,xerr=rspan,yerr=err,linewidth=3)
plot(VICMD,MvCMD,'r+',linewidth=3)
xyset = [[str(MCMD[i]),VICMD[i],MvCMD[i]] for i in range(len(VICMD))]
print(xyset[0:3])
for label, x, y in xyset:
annotate(label,xy = (x, y))
# visible region
# plt.xlim([10**0,3*10**1])
# plt.ylim([10**-2,10**2])
xlabel(r'$V-I$')
ylabel(r'$M_V$')
# legend(['\rho','\rho'],'lower left')
# title('z=11.7')
ioff();savefig(dir+dwarf+"/HRD.pdf")
show();clf()
|
PascalSteger/gravimage
|
programs/stellarmass.py
|
Python
|
gpl-2.0
| 2,675
|
[
"Galaxy"
] |
6877843b4137a6265b1bfa0c0a5dde6e3002c259305266fb052b3bf0f3e250ca
|
# -*- coding: utf-8 -*-
# Copyright 2016-2020 Fabian Hofmann (FIAS), Jonas Hoersch (KIT, IAI) and
# Fabian Gotzens (FZJ, IEK-STE)
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Collection of power plant data bases and statistical data
"""
import logging
import os
import re
import xml.etree.ElementTree as ET
from zipfile import ZipFile
import entsoe as entsoe_api
import numpy as np
import pandas as pd
import pycountry
import requests
from deprecation import deprecated
from .cleaning import (
clean_powerplantname,
clean_technology,
gather_fueltype_info,
gather_set_info,
gather_technology_info,
)
from .core import _data_in, _package_data, get_config
from .heuristics import scale_to_net_capacities
from .utils import (
config_filter,
correct_manually,
fill_geoposition,
get_raw_file,
set_column_name,
)
logger = logging.getLogger(__name__)
cget = pycountry.countries.get
net_caps = get_config()["display_net_caps"]
def BEYONDCOAL(raw=False, update=False, config=None):
"""
Importer for the BEYOND COAL database.
Parameters
----------
raw : boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
fn = get_raw_file("BEYONDCOAL", update=update, config=config)
df = pd.read_excel(fn, sheet_name="Plant", header=[0, 1, 2], skiprows=[3])
if raw:
return df
phaseout_col = "Covered by country phase-out? [if yes: country phase-out year]"
df = (
df["Plant Data"]
.droplevel(1, axis=1)
.rename(
columns={
"Plant name": "Name",
"Fuel type": "Fueltype",
"Latitude": "lat",
"Longitude": "lon",
"Commissioning year of first unit": "DateIn",
"(Announced) Retirement year of last unit": "DateOut",
"Coal capacity open": "Capacity",
"Plant status\n(gross)": "status",
"EBC plant ID": "projectID",
}
)
.query('status != "Cancelled"')
.assign(
DateOut=lambda df: df.DateOut.fillna(df[phaseout_col]).replace(
{8888: np.nan}
),
projectID=lambda df: "BEYOND-" + df.projectID,
Fueltype=lambda df: df.Fueltype.str.title(),
Set="PP",
)
.pipe(config_filter, name="BEYONDCOAL", config=config)
.pipe(set_column_name, "BEYONDCOAL")
)
return df
def OPSD(
rawEU=False,
rawDE=False,
rawDE_withBlocks=False,
update=False,
statusDE=["operating", "reserve", "special_case", "shutdown_temporary"],
config=None,
):
"""
Importer for the OPSD (Open Power Systems Data) database.
Parameters
----------
rawEU : Boolean, default False
Whether to return the raw EU (=non-DE) database.
rawDE : Boolean, default False
Whether to return the raw DE database.
update: bool, default False
Whether to update the data from the url.
statusDE : list, default ['operating', 'reserve', 'special_case']
Filter DE entries by operational status ['operating', 'shutdown',
'reserve', etc.]
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
opsd_DE = pd.read_csv(get_raw_file("OPSD_DE", update, config), na_values=" ")
opsd_EU = pd.read_csv(get_raw_file("OPSD_EU", update, config), na_values=" ")
if rawEU and rawDE:
raise (
NotImplementedError(
"""
It is not possible to show both DE and EU raw databases at the
same time as they have different formats. Choose only one!
"""
)
)
if rawEU:
return opsd_EU
if rawDE:
return opsd_DE
if rawDE_withBlocks:
DE_blocks = (
opsd_DE.loc[lambda x: ~(x["block_bnetza"].isna())]
.loc[lambda x: x["block_bnetza"] != x["name_bnetza"]]
.assign(block=lambda x: x.block_bnetza.str.strip())
.loc[lambda x: ~(x.block.isin(["-", "entfällt"]))]
.assign(len_block=lambda x: x.block.apply(len))
)
upd = (
DE_blocks.loc[lambda x: (x.len_block <= 6)]
.loc[lambda x: (x.block.str.slice(0, 5) != "Block")]
.assign(block=lambda x: "Block " + x["block"])
)
DE_blocks.update(upd)
DE_blocks = DE_blocks.assign(
name_bnetza=lambda x: x["name_bnetza"].str.strip() + " " + x["block"]
)
opsd_DE.update(DE_blocks)
return opsd_DE.drop("Unnamed: 0", axis=1).set_index("id")
opsd_EU = (
opsd_EU.rename(columns=str.title)
.rename(
columns={
"Lat": "lat",
"Lon": "lon",
"Energy_Source": "Fueltype",
"Commissioned": "DateIn",
"Eic_Code": "EIC",
}
)
.eval("DateRetrofit = DateIn")
.assign(projectID=lambda s: "OEU" + pd.Series(s.index.astype(str), s.index))
.reindex(columns=config["target_columns"])
)
opsd_DE = (
opsd_DE.rename(columns=str.title)
.rename(
columns={
"Lat": "lat",
"Lon": "lon",
"Fuel": "Fueltype",
"Type": "Set",
"Country_Code": "Country",
"Capacity_Net_Bnetza": "Capacity",
"Commissioned": "DateIn",
"Shutdown": "DateOut",
"Eic_Code_Plant": "EIC",
"Id": "projectID",
}
)
.assign(
Name=lambda d: d.Name_Bnetza.fillna(d.Name_Uba),
Fueltype=lambda d: d.Fueltype.fillna(d.Energy_Source_Level_1),
DateRetrofit=lambda d: d.Retrofit.fillna(d.DateIn),
)
)
if statusDE is not None:
opsd_DE = opsd_DE.loc[opsd_DE.Status.isin(statusDE)]
opsd_DE = opsd_DE.reindex(columns=config["target_columns"])
return (
pd.concat([opsd_EU, opsd_DE], ignore_index=True)
.replace(
dict(
Fueltype={
"Biomass and biogas": "Bioenergy",
"Fossil fuels": np.nan,
"Mixed fossil fuels": "Other",
"Natural gas": "Natural Gas",
"Non-renewable waste": "Waste",
"Other bioenergy and renewable waste": "Bioenergy",
"Other or unspecified energy sources": "Other",
"Other fossil fuels": "Other",
"Other fuels": "Other",
},
Set={"IPP": "PP"},
)
)
.replace(
{"Country": {"UK": "GB", "[ \t]+|[ \t]+$.": ""}, "Capacity": {0.0: np.nan}},
regex=True,
)
.dropna(subset=["Capacity"])
.assign(
Name=lambda df: df.Name.str.title().str.strip(),
Fueltype=lambda df: df.Fueltype.str.title().str.strip(),
)
.powerplant.convert_alpha2_to_country()
.pipe(set_column_name, "OPSD")
# .pipe(correct_manually, 'OPSD', config=config)
.pipe(config_filter, name="OPSD", config=config)
.pipe(gather_set_info)
.pipe(clean_powerplantname)
.pipe(clean_technology)
)
def GEO(raw=False, update=False, config=None):
"""
Importer for the GEO database.
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
rename_cols = {
"GEO_Assigned_Identification_Number": "projectID",
"Name": "Name",
"Type": "Fueltype",
"Type_of_Plant_rng1": "Technology",
"Type_of_Fuel_rng1_Primary": "FuelClassification1",
"Type_of_Fuel_rng2_Secondary": "FuelClassification2",
"Country": "Country",
"Design_Capacity_MWe_nbr": "Capacity",
"Year_Project_Commissioned": "DateIn",
"Year_rng1_yr1": "DateRetrofit",
"Longitude_Start": "lon",
"Latitude_Start": "lat",
}
geo = pd.read_csv(
get_raw_file("GEO", update=update, config=config), low_memory=False
)
if raw:
return geo
geo = geo.rename(columns=rename_cols)
units = pd.read_csv(
get_raw_file("GEO_units", update=update, config=config), low_memory=False
)
# map from units to plants
units["DateIn"] = units.Date_Commissioned_dt.str[:4].astype(float)
units["Efficiency"] = (
units.Unit_Efficiency_Percent.str.replace("%", "").astype(float) / 100
)
units = units.groupby("GEO_Assigned_Identification_Number").agg(
{"DateIn": [min, max], "Efficiency": "mean"}
)
_ = geo.projectID.map(units.DateIn["min"])
geo["DateIn"] = (
geo.DateIn.str[:4]
.apply(pd.to_numeric, errors="coerce")
.where(lambda x: x > 1900)
.fillna(_)
)
_ = geo.projectID.map(units.DateIn["max"])
geo["DateRetrofit"] = geo.DateRetrofit.astype(float).fillna(_)
_ = units.Efficiency["mean"]
geo["Efficiency"] = geo.projectID.map(_)
countries = config["target_countries"]
return (
geo.assign(projectID=lambda s: "GEO" + s.projectID.astype(str))
.query("Country in @countries")
.replace(
{
col: {"Gas": "Natural Gas"}
for col in {"Fueltype", "FuelClassification1", "FuelClassification2"}
}
)
.pipe(gather_fueltype_info, search_col=["FuelClassification1"])
.pipe(gather_technology_info, search_col=["FuelClassification1"], config=config)
.pipe(gather_set_info)
.pipe(set_column_name, "GEO")
.pipe(config_filter, name="GEO", config=config)
.pipe(clean_powerplantname)
.pipe(clean_technology, generalize_hydros=True)
.pipe(scale_to_net_capacities, (not config["GEO"]["net_capacity"]))
.pipe(config_filter, name="GEO", config=config)
.pipe(correct_manually, "GEO", config=config)
)
@deprecated(
deprecated_in="0.4.9",
removed_in="0.5.0",
details="Removed since data is not publicly available anymore",
)
def CARMA(raw=False, update=False, config=None):
"""
Importer for the Carma database.
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
carma = pd.read_csv(get_raw_file("CARMA", update, config), low_memory=False)
if raw:
return carma
return (
carma.rename(
columns={
"Geoposition": "Geoposition",
"cap": "Capacity",
"city": "location",
"country": "Country",
"fuel1": "Fueltype",
"lat": "lat",
"lon": "lon",
"plant": "Name",
"plant.id": "projectID",
}
)
.assign(projectID=lambda df: "CARMA" + df.projectID.astype(str))
.loc[lambda df: df.Country.isin(config["target_countries"])]
.replace(
dict(
Fueltype={
"COAL": "Hard Coal",
"WAT": "Hydro",
"FGAS": "Natural Gas",
"NUC": "Nuclear",
"FLIQ": "Oil",
"WIND": "Wind",
"EMIT": "Other",
"GEO": "Geothermal",
"WSTH": "Waste",
"SUN": "Solar",
"BLIQ": "Bioenergy",
"BGAS": "Bioenergy",
"BSOL": "Bioenergy",
"OTH": "Other",
}
)
)
.pipe(clean_powerplantname)
.drop_duplicates()
.pipe(set_column_name, "CARMA")
.pipe(config_filter, name="CARMA", config=config)
.pipe(gather_technology_info, config=config)
.pipe(gather_set_info)
.pipe(clean_technology)
.pipe(scale_to_net_capacities, not config["CARMA"]["net_capacity"])
.pipe(correct_manually, "CARMA", config=config)
)
def JRC(raw=False, update=False, config=None):
"""
Importer for the JRC Hydro-power plants database retrieves from
https://github.com/energy-modelling-toolkit/hydro-power-database.
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
fn = get_raw_file("JRC", update, config)
key = "jrc-hydro-power-plant-database.csv"
with ZipFile(fn, "r") as file:
df = pd.read_csv(file.open(key))
if raw:
return df
df = (
df.rename(
columns={
"id": "projectID",
"name": "Name",
"installed_capacity_MW": "Capacity",
"country_code": "Country",
"type": "Technology",
"dam_height_m": "DamHeight_m",
"volume_Mm3": "Volume_Mm3",
"storage_capacity_MWh": "StorageCapacity_MWh",
}
)
.eval("Duration = StorageCapacity_MWh / Capacity")
.replace(
dict(
Technology={
"HDAM": "Reservoir",
"HPHS": "Pumped Storage",
"HROR": "Run-Of-River",
}
)
)
.drop(columns=["pypsa_id", "GEO"])
.assign(Set="Store", Fueltype="Hydro")
.powerplant.convert_alpha2_to_country()
.pipe(config_filter)
)
# TODO: Temporary section to deal with duplicate identifiers in the JRC
# input file. Can be removed again, once the duplicates have been removed
# in a new release.
mask = df.projectID.duplicated(keep=False)
df.loc[mask, "projectID"] += (
df.groupby("projectID").cumcount().replace({0: "a", 1: "b", 2: "c", 3: "d"})
)
return df
@deprecated(
deprecated_in="0.4.9",
removed_in="0.5.0",
details="Use the JRC data instead",
)
def IWPDCY(config=None):
"""
This data is not yet available. Was extracted manually from
the 'International Water Power & Dam Country Yearbook'.
Parameters
----------
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
return (
pd.read_csv(config["IWPDCY"]["fn"], encoding="utf-8", index_col="id")
.assign(File="IWPDCY.csv", projectID=lambda df: "IWPDCY" + df.index.astype(str))
.dropna(subset=["Capacity"])
.pipe(set_column_name, "IWPDCY")
.pipe(config_filter, name="IWPDY", config=config)
.pipe(gather_set_info)
.pipe(correct_manually, "IWPDCY", config=config)
)
def Capacity_stats(
raw=False, level=2, config=None, update=False, source="entsoe SO&AF", year=2016
):
"""
Standardize the aggregated capacity statistics provided by the ENTSO-E.
Parameters
----------
year : int
Year of the data (range usually 2013-2017)
(defaults to 2016)
source : str
Which statistics source from
{'entsoe SO&AF', 'entsoe Statistics', 'EUROSTAT', ...}
(defaults to 'entsoe SO&AF')
Returns
-------
df : pd.DataFrame
Capacity statistics per country and fuel-type
"""
if config is None:
config = get_config()
df = pd.read_csv(get_raw_file("Capacity_stats", update, config), index_col=0)
if raw:
return df
countries = config["target_countries"]
df = (
df.query("source == @source & year == @year")
.rename(columns={"technology": "Fueltype"})
.rename(columns=str.title)
.powerplant.convert_alpha2_to_country()
# .query('Country in @countries')
.replace(
dict(
Fueltype={
"Bioenergy and other renewable fuels": "Bioenergy",
"Bioenergy and renewable waste": "Waste",
"Coal derivatives": "Hard Coal",
"Differently categorized fossil fuels": "Other",
"Differently categorized renewable energy sources": "Other",
"Hard coal": "Hard Coal",
"Mixed fossil fuels": "Other",
"Natural gas": "Natural Gas",
"Other or unspecified energy sources": "Other",
"Tide, wave, and ocean": "Other",
}
)
)
.loc[lambda df: df.Fueltype.isin(config["target_fueltypes"])]
.pipe(set_column_name, source.title())
)
return df
def GPD(raw=False, update=False, config=None, filter_other_dbs=True):
"""
Importer for the `Global Power Plant Database`.
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
fn = get_raw_file("GPD", update, config)
key = "global_power_plant_database.csv"
with ZipFile(fn, "r") as file:
df = pd.read_csv(file.open(key), low_memory=False)
if raw:
return df
other_dbs = []
if filter_other_dbs:
other_dbs = ["GEODB", "Open Power System Data", "ENTSOE"]
countries = config["target_countries"]
return (
df.rename(columns=lambda x: x.title())
.query("Country_Long in @countries &" " Source not in @other_dbs")
.drop(columns="Country")
.rename(
columns={
"Gppd_Idnr": "projectID",
"Country_Long": "Country",
"Primary_Fuel": "Fueltype",
"Latitude": "lat",
"Longitude": "lon",
"Capacity_Mw": "Capacity",
# 'Source': 'File'
"Commissioning_Year": "DateIn",
}
)
.replace(
dict(
Fueltype={
"Coal": "Hard Coal",
"Biomass": "Bioenergy",
"Gas": "Natural Gas",
"Wave and Tidal": "Hydro",
}
)
)
.pipe(clean_powerplantname)
.pipe(set_column_name, "GPD")
.pipe(config_filter, name="GPD", config=config)
.pipe(gather_technology_info, config=config)
# .pipe(gather_set_info)
# .pipe(correct_manually, 'GPD', config=config)
)
# def WIKIPEDIA(raw=False):
# from bs4 import BeautifulSoup
#
# url = 'https://en.wikipedia.org/wiki/List_of_power_stations_in_Germany'
#
# dfs = pd.read_html(url, attrs={"class": ["wikitable","wikitable sortable"]})
# soup = BeautifulSoup(requests.get(url).text)
# all_headers = [h.text for h in soup.find_all("h2")]
# headers = [header[:-6] for header in all_headers if header[-6:] == '[edit]']
# headers = headers[:len(dfs)]
# df = pd.concat(dfs, keys=headers, axis=0, sort=True)
def ESE(raw=False, update=False, config=None):
"""
Importer for the ESE database.
This database is not given within the repository because of its
restrictive license.
Get it by clicking 'Export Data XLS' on https://goo.gl/gVMwKJ and
save the downloaded 'projects.xls' file in
/path/to/powerplantmatching/data/in/.
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
df = pd.read_csv(get_raw_file("ESE", update, config), error_bad_lines=False)
if raw:
return df
target_countries = config["target_countries"]
return (
df.rename(columns=str.strip)
.rename(
columns={
"Title": "Name",
"Technology Mid-Type": "Technology",
"Longitude": "lon",
"Latitude": "lat",
"Technology Broad Category": "Fueltype",
}
)
.assign(
Set="Store",
projectID="ESE" + df.index.astype(str),
DateIn=lambda df: (
df["Commissioned"].str[-4:].apply(pd.to_numeric, errors="coerce")
),
Capacity=df["Rated Power"] / 1e3,
)
.query("Status == 'Operational' & Country in @target_countries")
.pipe(clean_powerplantname)
.pipe(clean_technology, generalize_hydros=True)
.replace(
dict(
Fueltype={
"Electro-chemical": "Battery",
"Pumped Hydro Storage": "Hydro",
}
)
)
.pipe(set_column_name, "ESE")
.pipe(config_filter, name="ESE", config=config)
# .pipe(correct_manually, 'ESE', config=config)
)
def ENTSOE(raw=False, update=False, config=None, entsoe_token=None):
"""
Importer for the list of installed generators provided by the ENTSO-E
Transparency Project. Geographical information is not given.
If update=True, the dataset is parsed through a request to
'https://transparency.entsoe.eu/generation/r2/\
installedCapacityPerProductionUnit/show',
Internet connection required. If raw=True, the same request is done, but
the unprocessed data is returned.
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
entsoe_token: String
Security token of the ENTSO-E Transparency platform
Note: For obtaining a security token refer to section 2 of the
RESTful API documentation of the ENTSOE-E Transparency platform
https://transparency.entsoe.eu/content/static_content/Static%20content/
web%20api/Guide.html#_authentication_and_authorisation. Please save the
token in your config.yaml file (key 'entsoe_token').
"""
config = get_config() if config is None else config
def parse_entsoe(entsoe_token):
url = "https://transparency.entsoe.eu/api"
# retrieved from pd.read_html('https://transparency.entsoe.eu/content/stat
# ic_content/Static%20content/web%20api/Guide.html#_request_methods')[-1]
level1 = ["registeredResource.name", "registeredResource.mRID"]
level2 = ["voltage_PowerSystemResources.highVoltageLimit", "psrType"]
level3 = ["quantity"]
def namespace(element):
m = re.match(r"\{.*\}", element.tag)
return m.group(0) if m else ""
entsoe = pd.DataFrame()
logger.info(f"Retrieving data from {url}")
for domain in entsoe_api.mappings.Area:
ret = requests.get(
url,
params=dict(
securityToken=entsoe_token,
documentType="A71",
processType="A33",
In_Domain=domain,
periodStart="201912312300",
periodEnd="202012312300",
),
)
etree = ET.fromstring(ret.content)
ns = namespace(etree)
df_domain = pd.DataFrame(columns=level1 + level2 + level3 + ["Country"])
for i, level in enumerate([level1, level2, level3]):
for arg in level:
df_domain[arg] = [
e.text for e in etree.findall("*/" * (i + 1) + ns + arg)
]
entsoe = entsoe.append(df_domain, ignore_index=True)
return entsoe
path = get_raw_file("ENTSOE", config=config, skip_retrieve=True)
if os.path.exists(path) and not update:
df = pd.read_csv(path)
else:
token = config.get("entsoe_token")
if token is not None:
df = parse_entsoe(token)
df.to_csv(path)
else:
logger.info(
"No entsoe_token in config.yaml given, "
"falling back to stored version."
)
df = pd.read_csv(get_raw_file("ENTSOE", update, config))
if raw:
return df
fuelmap = entsoe_api.mappings.PSRTYPE_MAPPINGS
country_map_entsoe = (
pd.read_csv(_package_data("entsoe_country_codes.csv"), index_col=0)
.rename(index=str)
.Country
)
return (
df.rename(
columns={
"psrType": "Fueltype",
"quantity": "Capacity",
"registeredResource.mRID": "projectID",
"registeredResource.name": "Name",
}
)
.reindex(columns=config["target_columns"])
.replace({"Fueltype": fuelmap})
.drop_duplicates("projectID")
.assign(
EIC=lambda df: df.projectID,
Country=lambda df: df.projectID.str[:2].map(country_map_entsoe),
Name=lambda df: df.Name.str.title(),
Fueltype=lambda df: df.Fueltype.replace(
{
"Fossil Hard coal": "Hard Coal",
"Fossil Coal-derived gas": "Other",
".*Hydro.*": "Hydro",
".*Oil.*": "Oil",
".*Peat": "Bioenergy",
"Fossil Brown coal/Lignite": "Lignite",
"Biomass": "Bioenergy",
"Fossil Gas": "Natural Gas",
"Marine": "Other",
"Wind Offshore": "Offshore",
"Wind Onshore": "Onshore",
},
regex=True,
),
Capacity=lambda df: pd.to_numeric(df.Capacity),
)
.powerplant.convert_alpha2_to_country()
.pipe(clean_powerplantname)
.pipe(fill_geoposition, use_saved_locations=True, saved_only=True)
.query("Capacity > 0")
.pipe(gather_technology_info, config=config)
.pipe(gather_set_info)
.pipe(clean_technology)
.pipe(set_column_name, "ENTSOE")
.pipe(config_filter, name="ENTSOE", config=config)
.pipe(correct_manually, "ENTSOE", config=config)
)
# def OSM():
# """
# Parser and Importer for Open Street Map power plant data.
# """
# import requests
# overpass_url = "http://overpass-api.de/api/interpreter"
# overpass_query = """
# [out:json][timeout:210];
# area["name"="Luxembourg"]->.boundaryarea;
# (
# // query part for: “power=plant”
# node["power"="plant"](area.boundaryarea);
# way["power"="plant"](area.boundaryarea);
# relation["power"="plant"](area.boundaryarea);
# node["power"="generator"](area.boundaryarea);
# way["power"="generator"](area.boundaryarea);
# relation["power"="generator"](area.boundaryarea);
# );
# out body;
# """
# response = requests.get(overpass_url,
# params={'data': overpass_query})
# data = response.json()
# df = pd.DataFrame(data['elements'])
# df = pd.concat([df.drop(columns='tags'), df.tags.apply(pd.Series)], axis=1)
#
@deprecated(
deprecated_in="0.4.9",
removed_in="0.5.0",
details="This function is not maintained anymore.",
)
def WEPP(raw=False, config=None):
"""
Importer for the standardized WEPP (Platts, World Elecrtric Power
Plants Database). This database is not provided by this repository because
of its restrictive licence.
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
# Define the appropriate datatype for each column (some columns e.g.
# 'YEAR' cannot be integers, as there are N/A values, which np.int
# does not yet(?) support.)
datatypes = {
"UNIT": str,
"PLANT": str,
"COMPANY": str,
"MW": np.float64,
"STATUS": str,
"YEAR": np.float64,
"UTYPE": str,
"FUEL": str,
"FUELTYPE": str,
"ALTFUEL": str,
"SSSMFR": str,
"BOILTYPE": str,
"TURBMFR": str,
"TURBTYPE": str,
"GENMFR": str,
"GENTYPE": str,
"SFLOW": np.float64,
"SPRESS": np.float64,
"STYPE": str,
"STEMP": np.float64,
"REHEAT1": np.float64,
"REHEAT2": np.float64,
"PARTCTL": str,
"PARTMFR": str,
"SO2CTL": str,
"FGDMFR": str,
"NOXCTL": str,
"NOXMFR": str,
"AE": str,
"CONstr, UCT": str,
"COOL": str,
"RETIRE": np.float64,
"CITY": str,
"STATE": str,
"COUNTRY": str,
"AREA": str,
"SUBREGION": str,
"POSTCODE": str,
"PARENT": str,
"ELECTYPE": str,
"BUSTYPE": str,
"COMPID": str,
"LOCATIONID": str,
"UNITID": str,
}
# Now read the Platts WEPP Database
wepp = pd.read_csv(config["WEPP"]["source_file"], dtype=datatypes, encoding="utf-8")
if raw:
return wepp
# Fit WEPP-column names to our specifications
wepp.columns = wepp.columns.str.title()
wepp.rename(
columns={
"Unit": "Name",
"Fuel": "Fueltype",
"Fueltype": "Technology",
"Mw": "Capacity",
"Year": "DateIn",
"Retire": "DateOut",
"Lat": "lat",
"Lon": "lon",
"Unitid": "projectID",
},
inplace=True,
)
wepp.loc[:, "DateRetrofit"] = wepp.DateIn
# Do country transformations and drop those which are not in defined scope
c = {
"ENGLAND & WALES": "UNITED KINGDOM",
"GIBRALTAR": "SPAIN",
"SCOTLAND": "UNITED KINGDOM",
}
wepp.Country = wepp.Country.replace(c).str.title()
wepp = (
wepp.loc[lambda df: df.Country.isin(config["target_countries"])]
.loc[lambda df: df.Status.isin(["OPR", "CON"])]
.assign(File=config["WEPP"]["source_file"])
)
# Replace fueltypes
d = {
"AGAS": "Bioenergy", # Syngas from gasified agricultural waste
"BFG": "Other", # blast furnance gas -> "Hochofengas"
"BGAS": "Bioenergy",
"BIOMASS": "Bioenergy",
"BL": "Bioenergy",
"CGAS": "Hard Coal",
"COAL": "Hard Coal",
"COG": "Other", # coke oven gas -> deutsch: "Hochofengas"
"COKE": "Hard Coal",
"CSGAS": "Hard Coal", # Coal-seam-gas
"CWM": "Hard Coal", # Coal-water mixture (aka coal-water slurry)
"DGAS": "Other", # sewage digester gas -> deutsch: "Klaergas"
"FGAS": "Other", # Flare gas or wellhead gas or associated gas
"GAS": "Natural Gas",
"GEO": "Geothermal",
"H2": "Other", # Hydrogen gas
"HZDWST": "Waste", # Hazardous waste
"INDWST": "Waste", # Industrial waste or refinery waste
"JET": "Oil", # Jet fuels
"KERO": "Oil", # Kerosene
"LGAS": "Other", # landfill gas -> deutsch: "Deponiegas"
"LIGNIN": "Bioenergy",
"LIQ": "Other", # (black) liqour -> deutsch: "Schwarzlauge",
# die bei Papierherstellung anfaellt
"LNG": "Natural Gas", # Liquified natural gas
"LPG": "Natural Gas", # Liquified petroleum gas (u. butane/propane)
"MBM": "Bioenergy", # Meat and bonemeal
"MEDWST": "Bioenergy", # Medical waste
"MGAS": "Other", # mine gas -> deutsch: "Grubengas"
"NAP": "Oil", # naphta
"OGAS": "Oil", # Gasified crude oil/refinery bottoms/bitumen
"PEAT": "Other",
"REF": "Waste",
"REFGAS": "Other", # Syngas from gasified refuse
"RPF": "Waste", # Waste paper and/or waste plastic
"PWST": "Other", # paper mill waste
"RGAS": "Other", # refinery off-gas -> deutsch: "Raffineriegas"
"SHALE": "Oil",
"SUN": "Solar",
"TGAS": "Other", # top gas -> deutsch: "Hochofengas"
"TIRES": "Other", # Scrap tires
"UNK": "Other",
"UR": "Nuclear",
"WAT": "Hydro",
"WOOD": "Bioenergy",
"WOODGAS": "Bioenergy",
"WSTGAS": "Other", # waste gas -> deutsch: "Industrieabgas"
"WSTWSL": "Waste", # Wastewater sludge
"WSTH": "Waste",
}
wepp.Fueltype = wepp.Fueltype.replace(d)
# Fill NaNs to allow str actions
wepp.Technology.fillna("", inplace=True)
wepp.Turbtype.fillna("", inplace=True)
# Correct technology infos:
wepp.loc[wepp.Technology.str.contains("LIG", case=False), "Fueltype"] = "Lignite"
wepp.loc[
wepp.Turbtype.str.contains("KAPLAN|BULB", case=False), "Technology"
] = "Run-Of-River"
wepp.Technology = wepp.Technology.replace(
{"CONV/PS": "Pumped Storage", "CONV": "Reservoir", "PS": "Pumped Storage"}
)
tech_st_pattern = [
"ANTH",
"BINARY",
"BIT",
"BIT/ANTH",
"BIT/LIG",
"BIT/SUB",
"BIT/SUB/LIG",
"COL",
"DRY ST",
"HFO",
"LIG",
"LIG/BIT",
"PWR",
"RDF",
"SUB",
]
tech_ocgt_pattern = ["AGWST", "LITTER", "RESID", "RICE", "STRAW"]
tech_ccgt_pattern = ["LFO"]
wepp.loc[wepp.Technology.isin(tech_st_pattern), "Technology"] = "Steam Turbine"
wepp.loc[wepp.Technology.isin(tech_ocgt_pattern), "Technology"] = "OCGT"
wepp.loc[wepp.Technology.isin(tech_ccgt_pattern), "Technology"] = "CCGT"
ut_ccgt_pattern = [
"CC",
"GT/C",
"GT/CP",
"GT/CS",
"GT/ST",
"ST/C",
"ST/CC/GT",
"ST/CD",
"ST/CP",
"ST/CS",
"ST/GT",
"ST/GT/IC",
"ST/T",
"IC/CD",
"IC/CP",
"IC/GT",
]
ut_ocgt_pattern = ["GT", "GT/D", "GT/H", "GT/HY", "GT/IC", "GT/S", "GT/T", "GTC"]
ut_st_pattern = ["ST", "ST/D"]
ut_ic_pattern = ["IC", "IC/H"]
wepp.loc[wepp.Utype.isin(ut_ccgt_pattern), "Technology"] = "CCGT"
wepp.loc[wepp.Utype.isin(ut_ocgt_pattern), "Technology"] = "OCGT"
wepp.loc[wepp.Utype.isin(ut_st_pattern), "Technology"] = "Steam Turbine"
wepp.loc[wepp.Utype.isin(ut_ic_pattern), "Technology"] = "Combustion Engine"
wepp.loc[wepp.Utype == "WTG", "Technology"] = "Onshore"
wepp.loc[wepp.Utype == "WTG/O", "Technology"] = "Offshore"
wepp.loc[
(wepp.Fueltype == "Solar") & (wepp.Utype.isin(ut_st_pattern)), "Technology"
] = "CSP"
# Derive the SET column
chp_pattern = [
"CC/S",
"CC/CP",
"CCSS/P",
"GT/CP",
"GT/CS",
"GT/S",
"GT/H",
"IC/CP",
"IC/H",
"ST/S",
"ST/H",
"ST/CP",
"ST/CS",
"ST/D",
]
wepp.loc[wepp.Utype.isin(chp_pattern), "Set"] = "CHP"
wepp.loc[wepp.Set.isnull(), "Set"] = "PP"
# Clean up the mess
wepp.Fueltype = wepp.Fueltype.str.title()
wepp.loc[wepp.Technology.str.len() > 4, "Technology"] = wepp.loc[
wepp.Technology.str.len() > 4, "Technology"
].str.title()
# Done!
wepp.datasetID = "WEPP"
return (
wepp.pipe(set_column_name, "WEPP")
.pipe(config_filter, name="WEPP", config=config)
.pipe(scale_to_net_capacities, (not config["WEPP"]["net_capacity"]))
.pipe(correct_manually, "WEPP", config=config)
)
def UBA(
raw=False,
update=False,
config=None,
header=9,
skipfooter=26,
prune_wind=True,
prune_solar=True,
):
"""
Importer for the UBA Database. Please download the data from
`<https://www.umweltbundesamt.de/dokument/datenbank-kraftwerke-in
-deutschland>`_.
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
header : int, Default 9
The zero-indexed row in which the column headings are found.
skipfooter : int, Default 26
"""
config = get_config() if config is None else config
fn = get_raw_file("UBA", update, config)
uba = pd.read_excel(fn, skipfooter=skipfooter, na_values="n.b.", header=header)
if raw:
return uba
uba = uba.rename(
columns={
"Kraftwerksname / Standort": "Name",
"Elektrische Bruttoleistung (MW)": "Capacity",
"Inbetriebnahme (ggf. Ertüchtigung)": "DateIn",
"Primärenergieträger": "Fueltype",
"Anlagenart": "Technology",
"Fernwärme-leistung (MW)": "CHP",
"Standort-PLZ": "PLZ",
}
)
from .heuristics import PLZ_to_LatLon_map
uba = uba.assign(
Name=uba.Name.replace({r"\s\s+": " "}, regex=True),
lon=uba.PLZ.map(PLZ_to_LatLon_map()["lon"]),
lat=uba.PLZ.map(PLZ_to_LatLon_map()["lat"]),
DateIn=uba.DateIn.str.replace(r"\(|\)|\/|\-", " ")
.str.split(" ")
.str[0]
.astype(float),
Country="Germany",
File="kraftwerke-de-ab-100-mw.xls",
projectID=["UBA{:03d}".format(i + header + 2) for i in uba.index],
Technology=uba.Technology.replace(
{
"DKW": "Steam Turbine",
"DWR": "Pressurized Water Reactor",
"G/AK": "Steam Turbine",
"GT": "OCGT",
"GuD": "CCGT",
"GuD / HKW": "CCGT",
"HKW": "Steam Turbine",
"HKW (DT)": "Steam Turbine",
"HKW / GuD": "CCGT",
"HKW / SSA": "Steam Turbine",
"IKW": "OCGT",
"IKW / GuD": "CCGT",
"IKW / HKW": "Steam Turbine",
"IKW / HKW / GuD": "CCGT",
"IKW / SSA": "OCGT",
"IKW /GuD": "CCGT",
"LWK": "Run-Of-River",
"PSW": "Pumped Storage",
"SWK": "Reservoir Storage",
"SWR": "Boiled Water Reactor",
}
),
)
uba.loc[uba.CHP.notnull(), "Set"] = "CHP"
uba = uba.pipe(gather_set_info)
uba.loc[uba.Fueltype == "Wind (O)", "Technology"] = "Offshore"
uba.loc[uba.Fueltype == "Wind (L)", "Technology"] = "Onshore"
uba.loc[uba.Fueltype.str.contains("Wind"), "Fueltype"] = "Wind"
uba.loc[uba.Fueltype.str.contains("Braunkohle"), "Fueltype"] = "Lignite"
uba.loc[uba.Fueltype.str.contains("Steinkohle"), "Fueltype"] = "Hard Coal"
uba.loc[uba.Fueltype.str.contains("Erdgas"), "Fueltype"] = "Natural Gas"
uba.loc[uba.Fueltype.str.contains("HEL"), "Fueltype"] = "Oil"
uba.Fueltype = uba.Fueltype.replace(
{
"Biomasse": "Bioenergy",
"Gichtgas": "Other",
"HS": "Oil",
"Konvertergas": "Other",
"Licht": "Solar",
"Raffineriegas": "Other",
"Uran": "Nuclear",
"Wasser": "Hydro",
"\xd6lr\xfcckstand": "Oil",
}
)
uba.Name.replace([r"(?i)oe", r"(?i)ue"], ["ö", "ü"], regex=True, inplace=True)
if prune_wind:
uba = uba.loc[lambda x: x.Fueltype != "Wind"]
if prune_solar:
uba = uba.loc[lambda x: x.Fueltype != "Solar"]
return (
uba.pipe(set_column_name, "UBA").pipe(
scale_to_net_capacities, not config["UBA"]["net_capacity"]
)
# .pipe(config_filter, name='UBA', config=config)
# .pipe(correct_manually, 'UBA', config=config)
)
def BNETZA(
raw=False,
update=False,
config=None,
header=9,
sheet_name="Gesamtkraftwerksliste BNetzA",
prune_wind=True,
prune_solar=True,
):
"""
Importer for the database put together by Germany's 'Federal Network
Agency' (dt. 'Bundesnetzagentur' (BNetzA)).
Please download the data from
`<https://www.bundesnetzagentur.de/DE/Sachgebiete/ElektrizitaetundGas/
Unternehmen_Institutionen/Versorgungssicherheit/Erzeugungskapazitaeten/
Kraftwerksliste/kraftwerksliste-node.html>`_.
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
header : int, Default 9
The zero-indexed row in which the column headings are found.
"""
config = get_config() if config is None else config
fn = get_raw_file("BNETZA", update, config)
bnetza = pd.read_excel(fn, header=header, sheet_name=sheet_name, parse_dates=False)
if raw:
return bnetza
bnetza = bnetza.rename(
columns={
"Kraftwerksnummer Bundesnetzagentur": "projectID",
"Kraftwerksname": "Name",
"Netto-Nennleistung (elektrische Wirkleistung) in MW": "Capacity",
"Wärmeauskopplung (KWK)\n(ja/nein)": "Set",
"Ort\n(Standort Kraftwerk)": "Ort",
(
"Auswertung\nEnergieträger (Zuordnung zu einem "
"Hauptenergieträger bei Mehreren Energieträgern)"
): "Fueltype",
"Kraftwerksstatus \n(in Betrieb/\nvorläufig "
"stillgelegt/\nsaisonale Konservierung\nNetzreserve/ "
"Sicherheitsbereitschaft/\nSonderfall)": "Status",
(
"Aufnahme der kommerziellen Stromerzeugung der derzeit "
"in Betrieb befindlichen Erzeugungseinheit\n(Datum/Jahr)"
): "DateIn",
"PLZ\n(Standort Kraftwerk)": "PLZ",
}
)
# If BNetzA-Name is empty replace by company, if this is empty by city.
from .heuristics import PLZ_to_LatLon_map
pattern = "|".join(
[
".*(?i)betrieb",
".*(?i)gehindert",
"(?i)vorläufig.*",
"Sicherheitsbereitschaft",
"Sonderfall",
]
)
bnetza = bnetza.assign(
lon=bnetza.PLZ.map(PLZ_to_LatLon_map()["lon"]),
lat=bnetza.PLZ.map(PLZ_to_LatLon_map()["lat"]),
Name=bnetza.Name.where(
bnetza.Name.str.len().fillna(0) > 4,
bnetza.Unternehmen + " " + bnetza.Name.fillna(""),
)
.fillna(bnetza.Ort)
.str.strip(),
DateIn=bnetza.DateIn.str[:4].apply(pd.to_numeric, errors="coerce"),
Blockname=bnetza.Blockname.replace(
{
".*(GT|gasturbine).*": "OCGT",
".*(DT|HKW|(?i)dampfturbine|(?i)heizkraftwerk).*": "Steam Turbine",
".*GuD.*": "CCGT",
},
regex=True,
),
)[
lambda df: df.projectID.notna()
& df.Status.str.contains(pattern, regex=True, case=False)
].pipe(
gather_technology_info,
search_col=["Name", "Fueltype", "Blockname"],
config=config,
)
add_location_b = bnetza[bnetza.Ort.notnull()].apply(
lambda ds: (ds["Ort"] not in ds["Name"])
and (str.title(ds["Ort"]) not in ds["Name"]),
axis=1,
)
bnetza.loc[bnetza.Ort.notnull() & add_location_b, "Name"] = (
bnetza.loc[bnetza.Ort.notnull() & add_location_b, "Ort"]
+ " "
+ bnetza.loc[bnetza.Ort.notnull() & add_location_b, "Name"]
)
techmap = {
"solare": "PV",
"Laufwasser": "Run-Of-River",
"Speicherwasser": "Reservoir",
"Pumpspeicher": "Pumped Storage",
}
for fuel in techmap:
bnetza.loc[
bnetza.Fueltype.str.contains(fuel, case=False), "Technology"
] = techmap[fuel]
# Fueltypes
bnetza.Fueltype.replace(
{
"Erdgas": "Natural Gas",
"Steinkohle": "Hard Coal",
"Braunkohle": "Lignite",
"Wind.*": "Wind",
"Solar.*": "Solar",
".*(?i)energietr.*ger.*\n.*": "Other",
"Kern.*": "Nuclear",
"Mineral.l.*": "Oil",
"Biom.*": "Bioenergy",
".*(?i)(e|r|n)gas": "Other",
"Geoth.*": "Geothermal",
"Abfall": "Waste",
".*wasser.*": "Hydro",
".*solar.*": "PV",
},
regex=True,
inplace=True,
)
if prune_wind:
bnetza = bnetza[lambda x: x.Fueltype != "Wind"]
if prune_solar:
bnetza = bnetza[lambda x: x.Fueltype != "Solar"]
# Filter by country
bnetza = bnetza[~bnetza.Bundesland.isin(["Österreich", "Schweiz", "Luxemburg"])]
return (
bnetza.assign(
Country="Germany",
Set=bnetza.Set.fillna("Nein")
.str.title()
.replace({"Ja": "CHP", "Nein": "PP"}),
).pipe(set_column_name, "BNETZA")
# .pipe(config_filter, name='BNETZA', config=config)
# .pipe(correct_manually, 'BNETZA', config=config)
)
def OPSD_VRE(raw=False, update=False, config=None):
"""
Importer for the OPSD (Open Power Systems Data) renewables (VRE)
database.
This sqlite database is very big and hence not part of the package.
It needs to be obtained from
`<http://data.open-power-system-data.org/renewable_power_plants/>`_
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
df = pd.read_csv(get_raw_file("OPSD_VRE"), index_col=0, low_memory=False)
if raw:
return df
return (
df.rename(
columns={
"energy_source_level_2": "Fueltype",
"technology": "Technology",
"data_source": "file",
"country": "Country",
"electrical_capacity": "Capacity",
"municipality": "Name",
}
)
.assign(DateIn=lambda df: df.commissioning_date.str[:4].astype(float), Set="PP")
.powerplant.convert_alpha2_to_country()
.pipe(set_column_name, "OPSD_VRE")
.pipe(config_filter, config=config)
.drop("Name", axis=1)
)
def OPSD_VRE_country(country, raw=False, update=False, config=None):
"""
Get country specifig data from OPSD for renewables, if available.
Available for DE, FR, PL, CH, DK, CZ and SE (last update: 09/2020).
Parameters
----------
raw : Boolean, default False
Whether to return the original dataset
update: bool, default False
Whether to update the data from the url.
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
config = get_config() if config is None else config
# there is a problem with GB in line 1651 (version 20/08/20) use low_memory
df = pd.read_csv(get_raw_file(f"OPSD_VRE_{country}"), low_memory=False)
if raw:
return df
return (
df.assign(Country=country, Set="PP")
.rename(
columns={
"energy_source_level_2": "Fueltype",
"technology": "Technology",
"data_source": "file",
"electrical_capacity": "Capacity",
"municipality": "Name",
}
)
# there is a problem with GB in line 1651 (version 20/08/20)
.assign(Capacity=lambda df: pd.to_numeric(df.Capacity, "coerce"))
.powerplant.convert_alpha2_to_country()
.pipe(config_filter, config=config)
.drop("Name", axis=1)
)
@deprecated(
deprecated_in="0.4.9",
removed_in="0.5.0",
details="Removed since data is not publicly available anymore",
)
def IRENA_stats(config=None):
"""
Reads the IRENA Capacity Statistics 2017 Database
Parameters
----------
config : dict, default None
Add custom specific configuration,
e.g. powerplantmatching.config.get_config(target_countries='Italy'),
defaults to powerplantmatching.config.get_config()
"""
if config is None:
config = get_config()
# Read the raw dataset
df = pd.read_csv(_data_in("IRENA_CapacityStatistics2017.csv"), encoding="utf-8")
# "Unpivot"
df = pd.melt(
df,
id_vars=["Indicator", "Technology", "Country"],
var_name="Year",
value_vars=[str(i) for i in range(2000, 2017, 1)],
value_name="Capacity",
)
# Drop empty
df.dropna(axis=0, subset=["Capacity"], inplace=True)
# Drop generations
df = df[df.Indicator == "Electricity capacity (MW)"]
df.drop("Indicator", axis=1, inplace=True)
# Drop countries out of scope
df.Country.replace(
{"Czechia": "Czech Republic", "UK": "United Kingdom"}, inplace=True
)
df = df.loc[lambda df: df.Country.isin(config["target_countries"])]
# Convert to numeric
df.Year = df.Year.astype(int)
df.Capacity = df.Capacity.str.strip().str.replace(" ", "").astype(float)
# Handle Fueltypes and Technologies
d = {
"Bagasse": "Bioenergy",
"Biogas": "Bioenergy",
"Concentrated solar power": "Solar",
"Geothermal": "Geothermal",
"Hydro 1-10 MW": "Hydro",
"Hydro 10+ MW": "Hydro",
"Hydro <1 MW": "Hydro",
"Liquid biofuels": "Bioenergy",
"Marine": "Hydro",
"Mixed and pumped storage": "Hydro",
"Offshore wind energy": "Wind",
"Onshore wind energy": "Wind",
"Other solid biofuels": "Bioenergy",
"Renewable municipal waste": "Waste",
"Solar photovoltaic": "Solar",
}
df.loc[:, "Fueltype"] = df.Technology.map(d)
# df = df.loc[lambda df: df.Fueltype.isin(config['target_fueltypes'])]
d = {
"Concentrated solar power": "CSP",
"Solar photovoltaic": "PV",
"Onshore wind energy": "Onshore",
"Offshore wind energy": "Offshore",
}
df.Technology.replace(d, inplace=True)
df.loc[:, "Set"] = "PP"
return df.reset_index(drop=True).pipe(set_column_name, "IRENA Statistics")
|
FRESNA/powerplantmatching
|
powerplantmatching/data.py
|
Python
|
gpl-3.0
| 53,638
|
[
"BLAST"
] |
36b564b75d5450310528bd59007af3c57023fa932ca26791e484f4cc0840c3e7
|
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the Smiles file handling stuff
"""
import unittest,sys,os
from rdkit import RDConfig
from rdkit import Chem
from rdkit.six import next
class TestCase(unittest.TestCase):
def setUp(self):
self.smis = ['CC','CCC','CCCCC','CCCCCC','CCCCCCC','CC','CCCCOC']
def test1LazyReader(self):
" tests lazy reads """
supp = Chem.SmilesMolSupplierFromText('\n'.join(self.smis),',',0,-1,0)
for i in range(4):
m = next(supp)
assert m,'read %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
i = len(supp)-1
m = supp[i]
assert m,'read %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
ms = [x for x in supp]
for i in range(len(supp)):
m = ms[i]
if m:
ms[i] = Chem.MolToSmiles(m)
l = len(supp)
assert l == len(self.smis),'bad supplier length: %d'%(l)
i = len(self.smis)-3
m = supp[i-1]
assert m,'back index %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
with self.assertRaisesRegexp(Exception, ""):
m = supp[len(self.smis)] # out of bound read must fail
def test2LazyIter(self):
" tests lazy reads using the iterator interface "
supp = Chem.SmilesMolSupplierFromText('\n'.join(self.smis),',',0,-1,0)
nDone = 0
for mol in supp:
assert mol,'read %d failed'%nDone
assert mol.GetNumAtoms(),'no atoms in mol %d'%nDone
nDone += 1
assert nDone==len(self.smis),'bad number of molecules'
l = len(supp)
assert l == len(self.smis),'bad supplier length: %d'%(l)
i = len(self.smis)-3
m = supp[i-1]
assert m,'back index %d failed'%i
assert m.GetNumAtoms(),'no atoms in mol %d'%i
with self.assertRaisesRegexp(Exception, ""):
m = supp[len(self.smis)] # out of bound read must not fail
def test3BoundaryConditions(self):
smis = ['CC','CCOC','fail','CCO']
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0)
self.assertEqual(len(supp), 4)
self.assertIs(supp[2], None)
self.assertTrue(supp[3])
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0)
self.assertIs(supp[2], None)
self.assertTrue(supp[3])
self.assertEqual(len(supp), 4)
with self.assertRaisesRegexp(Exception, ""):
supp[4]
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0)
self.assertEqual(len(supp), 4)
self.assertTrue(supp[3])
with self.assertRaisesRegexp(Exception, ""):
supp[4]
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis),',',0,-1,0)
with self.assertRaisesRegexp(Exception, ""):
supp[4]
self.assertEqual(len(supp), 4)
self.assertTrue(supp[3])
if __name__ == '__main__':
unittest.main()
|
adalke/rdkit
|
rdkit/Chem/Suppliers/UnitTestSmilesMolSupplier.py
|
Python
|
bsd-3-clause
| 3,033
|
[
"RDKit"
] |
ba1d36b979bd81fe3cd661e4976fffbff4c00ab979d9590b2a0cdd7634c97ee2
|
import numpy as np
__all__ = ["OneHotFeaturizer"]
class Featurizer(object):
"""
Abstract class for calculating a set of features for a molecule.
Child classes implement the _featurize method for calculating features
for a single molecule.
"""
def featurize(self, mols, verbose=True, log_every_n=1000):
"""
Calculate features for molecules.
Parameters
----------
mols : iterable
RDKit Mol objects.
"""
mols = list(mols)
features = []
for i, mol in enumerate(mols):
if mol is not None:
features.append(self._featurize(mol))
else:
features.append(np.array([]))
features = np.asarray(features)
return features
def _featurize(self, mol):
"""
Calculate features for a single molecule.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
raise NotImplementedError('Featurizer is not defined.')
def __call__(self, mols):
"""
Calculate features for molecules.
Parameters
----------
mols : iterable
RDKit Mol objects.
"""
return self.featurize(mols)
class OneHotFeaturizer(Featurizer):
"""
NOTE(LESWING) Not Thread Safe in initialization of charset
"""
def __init__(self, charset=None, padlength=120):
"""
Parameters
----------
charset: obj:`list` of obj:`str`
Each string is length 1
padlength: int
length to pad the smile strings to
"""
self.charset = charset
self.pad_length = padlength
def featurize(self, smiles, verbose=True, log_every_n=1000):
"""
Parameters
----------
mols: obj
List of rdkit Molecule Objects
verbose: bool
How much logging
log_every_n:
How often to log
Returns
-------
obj
numpy array of features
"""
if self.charset is None:
self.charset = self._create_charset(smiles)
return np.array([self.one_hot_encoded(smile) for smile in smiles])
def one_hot_array(self, i):
"""
Create a one hot array with bit i set to 1
Parameters
----------
i: int
bit to set to 1
Returns
-------
obj:`list` of obj:`int`
length len(self.charset)
"""
return [int(x) for x in [ix == i for ix in range(len(self.charset))]]
def one_hot_index(self, c):
"""
TODO(LESWING) replace with map lookup vs linear scan
Parameters
----------
c
character whose index we want
Returns
-------
int
index of c in self.charset
"""
return self.charset.index(c)
def pad_smile(self, smile):
"""
Pad A Smile String to self.pad_length
Parameters
----------
smile: str
Returns
-------
str
smile string space padded to self.pad_length
"""
return smile.ljust(self.pad_length)
def one_hot_encoded(self, smile):
"""
One Hot Encode an entire SMILE string
Parameters
----------
smile: str
smile string to encode
Returns
-------
object
np.array of one hot encoded arrays for each character in smile
"""
return np.array([
self.one_hot_array(self.one_hot_index(x)) for x in self.pad_smile(smile)
])
def untransform(self, z):
"""
Convert from one hot representation back to SMILE
Parameters
----------
z: obj:`list`
list of one hot encoded features
Returns
-------
Smile Strings picking MAX for each one hot encoded array
"""
z1 = []
for i in range(len(z)):
s = ""
for j in range(len(z[i])):
oh = np.argmax(z[i][j])
s += self.charset[oh]
z1.append([s.strip()])
return z1
def _create_charset(self, smiles):
"""
create the charset from smiles
Parameters
----------
smiles: obj:`list` of obj:`str`
list of smile strings
Returns
-------
obj:`list` of obj:`str`
List of length one strings that are characters in smiles. No duplicates
"""
s = set()
for smile in smiles:
for c in smile:
s.add(c)
return [' '] + sorted(list(s))
|
cxhernandez/molencoder
|
molencoder/featurizers.py
|
Python
|
mit
| 4,722
|
[
"RDKit"
] |
25c223e958bda1580a09f9542f70b28340e02a57e70a3bfab489011772b1439e
|
import ast
import unittest
import mnfy
class FunctionToLambdaTests(unittest.TestCase):
def setUp(self):
self.transform = mnfy.FunctionToLambda()
def _test_failure(self, fxn_code):
fxn_ast = ast.parse(fxn_code)
new_ast = self.transform.visit(fxn_ast)
new_fxn = new_ast.body[0]
self.assertIsInstance(new_fxn, ast.FunctionDef,
'{} not an instance of ast.FunctionDef'.format(new_ast.__class__))
def test_decorator_fail(self):
self._test_failure('@dec\ndef X(): return')
def test_returns_annotation_fail(self):
self._test_failure('def X()->None: return')
def test_body_too_long_fail(self):
self._test_failure('def X(): x = 2 + 3; return x')
def test_body_not_return_fail(self):
self._test_failure('def X(): Y()')
def test_no_vararg_annotation_fail(self):
self._test_failure('def X(*arg:None): return')
def test_no_kwarg_annotation_fail(self):
self._test_failure('def X(**kwargs:None): return')
def test_no_arg_annotation_fail(self):
self._test_failure('def X(a, b:None, c): return')
def test_success(self):
module = ast.parse('def identity(): return 42')
fxn = module.body[0]
new_ast = self.transform.visit(module)
assign = new_ast.body[0]
self.assertIsInstance(assign, ast.Assign)
self.assertEqual(len(assign.targets), 1)
target = assign.targets[0]
self.assertIsInstance(target, ast.Name)
self.assertEqual(target.id, 'identity')
self.assertIsInstance(target.ctx, ast.Store)
lmda = assign.value
self.assertIsInstance(lmda, ast.Lambda)
self.assertIs(lmda.args, fxn.args)
self.assertIs(lmda.body, fxn.body[0].value)
def test_return_None(self):
# If a function has a bare return then the lambda should return None.
module = ast.parse('def fxn(): return')
new_ast = self.transform.visit(module)
lambda_ = new_ast.body[0].value
self.assertIsInstance(lambda_.body, ast.Name)
self.assertEqual(lambda_.body.id, 'None')
self.assertIsInstance(lambda_.body.ctx, ast.Load)
unittest.skip('not implemented')
def test_empty_return(self):
pass
if __name__ == '__main__':
unittest.main()
|
brettcannon/mnfy
|
tests/test_unsafe_transforms.py
|
Python
|
apache-2.0
| 2,323
|
[
"VisIt"
] |
9863c3f3bd72b5ddfe4c728f24be57086f5bd4d6474a4153c1348e85bcecc3af
|
""" Unit tests for vector functions """
import unittest
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import json
import picogeojson
from test_helper import TESTDATA
import karta.vector as vector
import karta.vector._geojson as geojson
from karta.vector.geometry import Point, Line, Polygon, Multipoint, Multiline, Multipolygon
from karta.crs import LonLatWGS84, WebMercator, Cartesian
class TestGeoJSON(unittest.TestCase):
def test_read_scalar_properties(self):
path = os.path.join(TESTDATA, "geojson_input/california.geojson")
geoms = vector.read_geojson(path)
self.assertEqual(geoms[0].properties, {'featurecla': 'Land', 'scalerank': 0})
return
def test_geometrycollection2geometry(self):
path = os.path.join(TESTDATA, "geojson_input/geometrycollection.json")
geoms = vector.read_geojson(path)
self.assertEqual(len(geoms), 2)
self.assertTrue(isinstance(geoms[0], vector.Point))
self.assertTrue(isinstance(geoms[1], vector.Line))
return
def test_featurecollection2geometry(self):
path = os.path.join(TESTDATA, "geojson_input/featurecollection.json")
features = vector.read_geojson(path)
ans0 = Point((102.0, 0.5), properties={"prop0":"value0"}, crs=LonLatWGS84)
self.assertEqual(features[0], ans0)
ans1 = Line([(102.0, 0.0), (103.0, 1.0), (104.0, 0.0), (105.0, 1.0)],
properties={"prop0":"value0", "prop1":0.0}, crs=LonLatWGS84)
self.assertEqual(features[1], ans1)
ans2 = Polygon([(100.0, 0.0), (101.0, 0.0), (101.0, 1.0), (100.0, 1.0),
(100.0, 0.0)],
properties={"prop0":"value0", "prop1":{"this":"that"}},
crs=LonLatWGS84)
self.assertEqual(features[2], ans2)
return
def test_read_capitols(self):
path = os.path.join(TESTDATA, "geojson_input/us-capitols.json")
features = vector.read_geojson(path)
names = ['Phoenix, Arizona, United States', 'Sacramento, California, United States',
'Atlanta, Georgia, United States', 'Indianapolis, Indiana, United States',
'Helena, Montana, United States', 'Columbus, Ohio, United States',
'Richmond, Virginia, United States', 'Topeka, Kansas, United States',
'Boston, Massachusetts, United States', 'Lincoln, Nebraska, United States',
'Oklahoma City, Oklahoma, United States', 'Juneau, Alaska, United States',
'Pierre, South Dakota, United States', 'Honolulu, Hawaii, United States',
'Montgomery, Alabama, United States',
'Little Rock, Arkansas, United States', 'Denver, Colorado, United States',
'Hartford, Connecticut, United States', 'Dover, Delaware, United States',
'Washington, District of Columbia, United States',
'Tallahassee, Florida, United States', 'Boise, Idaho, United States',
'Springfield, Illinois, United States', 'Des Moines, Iowa, United States',
'Frankfort, Kentucky, United States',
'Baton Rouge, Louisiana, United States', 'Augusta, Maine, United States',
'Annapolis, Maryland, United States', 'Lansing, Michigan, United States',
'Saint Paul, Minnesota, United States',
'Jackson, Mississippi, United States',
'Jefferson City, Missouri, United States',
'Carson City, Nevada, United States',
'Concord, New Hampshire, United States',
'Trenton, New Jersey, United States',
'Santa Fe, New Mexico, United States', 'Albany, New York, United States',
'Raleigh, North Carolina, United States',
'Bismarck, North Dakota, United States', 'Salem, Oregon, United States',
'Harrisburg, Pennsylvania, United States',
'Providence, Rhode Island, United States',
'Columbia, South Carolina, United States',
'Nashville, Tennessee, United States',
'Austin, Texas, United States', 'Salt Lake City, Utah, United States',
'Montpelier, Vermont, United States', 'Olympia, Washington, United States',
'Charleston, West Virginia, United States',
'Madison, Wisconsin, United States', 'Cheyenne, Wyoming, United States']
self.assertEqual(names, features[0].data.getfield("n"))
return
def test_read_with_crs(self):
path = os.path.join(TESTDATA, "geojson_input/us-capitols.json")
features = vector.read_geojson(path, crs=LonLatWGS84)
for f in features:
self.assertEqual(f.crs, LonLatWGS84)
return
class TestGeoJSONOutput(unittest.TestCase):
maxDiff = None
def verify(self, a, b, precision):
if type(a) != type(b):
raise AssertionError("{} != {}".format(type(a), type(b)))
if isinstance(a, list):
for (_a, _b) in zip(a, b):
self.verify(_a, _b, precision)
elif isinstance(a, float):
self.assertAlmostEqual(a, b, places=precision)
else:
self.assertEqual(a, b)
return
def verifyDict(self, d1, d2, precision):
for key, v in d1.items():
try:
self.assertTrue(key in d2)
except AssertionError as e:
raise AssertionError("key '{}' not in dict 2".format(key))
if isinstance(v, dict):
self.verifyDict(v, d2[key], precision)
else:
self.verify(v, d2[key], precision)
return
def verifyJSON(self, json1, json2, precision=7):
""" Verify that two JSON strings are equivalent """
obj1 = json.loads(json1)
obj2 = json.loads(json2)
self.verifyDict(obj1, obj2, precision)
return
def test_point_write_cartesian(self):
p = Point((100.0, 0.0), crs=Cartesian)
s = p.as_geojson(urn="urn:ogc:def:crs:EPSG::5806", force_wgs84=False)
ans = """{"properties": {},"bbox": [100.0, 0.0, 100.0, 0.0],
"geometry": {"coordinates": [100.0, 0.0], "type": "Point"},
"type": "Feature",
"crs": {"properties": {"name": "urn:ogc:def:crs:EPSG::5806"}, "type": "name"} }"""
self.verifyJSON(s, ans)
return
def test_point_write(self):
p = Point((100.0, 0.0), crs=LonLatWGS84)
s = p.as_geojson(urn="urn:ogc:def:crs:EPSG::5806")
ans = """{"properties": {}, "bbox": [100.0, 0.0, 100.0, 0.0], "geometry": {"coordinates": [100.0, 0.0], "type": "Point"}, "type": "Feature", "crs": {"properties": {"name": "urn:ogc:def:crs:EPSG::5806"}, "type": "name"}}"""
self.verifyJSON(s, ans)
return
def test_line_write(self):
p = Line([(100.0, 0.0), (101.0, 1.0)], crs=LonLatWGS84)
s = p.as_geojson(urn="urn:ogc:def:crs:EPSG::5806")
ans = """{"type": "Feature", "geometry": {"coordinates": [[100.0, 0.0], [101.0, 1.0]], "type": "LineString"}, "properties": {}, "bbox": [100.0, 0.0, 101.0, 1.0], "crs": {"type": "name", "properties": {"name": "urn:ogc:def:crs:EPSG::5806"}}}"""
self.verifyJSON(s, ans)
return
def test_polygon_write(self):
p = Polygon([[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0]], crs=LonLatWGS84)
s = p.as_geojson(urn="urn:ogc:def:crs:EPSG::5806")
ans = """{ "properties": {}, "bbox": [100.0, 0.0, 101.0, 1.0], "geometry": { "type": "Polygon", "coordinates": [ [ [ 100.0, 0.0 ], [ 101.0, 0.0 ], [ 101.0, 1.0 ], [ 100.0, 1.0 ], [ 100.0, 0.0 ] ] ] }, "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::5806" } }, "type": "Feature" }"""
self.verifyJSON(s, ans)
return
def test_multiline_write(self):
p = Multiline([[(100, 0), (101, 1)], [(102, 2), (103, 3)]], crs=LonLatWGS84)
s = p.as_geojson(urn="urn:ogc:def:crs:EPSG::5806")
ans = """{"type": "Feature", "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::5806" } }, "properties": {}, "bbox": [100.0, 0.0, 103.0, 3.0], "geometry" : { "type": "MultiLineString", "coordinates": [ [ [100.0, 0.0], [101.0, 1.0] ], [ [102.0, 2.0], [103.0, 3.0] ] ] } }"""
self.verifyJSON(s, ans)
def test_multipolygon_write(self):
p = Multipolygon([[[(102, 2), (103, 2), (103, 3), (102, 3)]],
[[(100, 0), (101, 0), (101, 1), (100, 1)],
[(100.2, 0.2), (100.8, 0.2), (100.8, 0.8), (100.2, 0.8)]]],
crs=LonLatWGS84)
s = p.as_geojson(urn="urn:ogc:def:crs:EPSG::5806")
ans = """{"type": "Feature", "properties": {},"bbox": [100.0, 0.0, 103.0, 3.0], "geometry" : { "type": "MultiPolygon",
"coordinates": [
[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
[[100.2, 0.2], [100.2, 0.8], [100.8, 0.8], [100.8, 0.2], [100.2, 0.2]]]
]
}, "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::5806" } } }"""
self.verifyJSON(s, ans)
def test_write_reproject(self):
# tests whether coordinates are correctly reprojected to WGS84 lon/lat
p = Line([(1e6, 1e6), (1.2e6, 1.4e6)], crs=WebMercator)
s = p.as_geojson()
ans = """{ "type": "Feature", "properties": {},
"bbox": [8.983152841195214, 8.946573850543412, 10.779783409434257, 12.476624651238847],
"crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },
"geometry": {
"coordinates": [[8.983152841195214, 8.946573850543412],
[10.779783409434257, 12.476624651238847]],
"type": "LineString" } }"""
self.verifyJSON(s, ans)
return
def test_write_string_data(self):
capitols = Multipoint([(-112.1, 33.57), (-121.5, 38.57),
(-84.42, 33.76), (-86.15, 39.78), (-112.0, 46.6),
(-82.99, 39.98), (-77.48, 37.53), (-95.69, 39.04),
(-71.02, 42.33), (-96.68, 40.81)],
data = {"n": ["Phoenix, Arizona",
"Sacramento, California",
"Atlanta, Georgia",
"Indianapolis, Indiana",
"Helena, Montana", "Columbus, Ohio",
"Richmond, Virginia", "Topeka, Kansas",
"Boston, Massachusetts",
"Lincoln, Nebraska"]},
crs=LonLatWGS84)
s = capitols.as_geojson(urn="urn:ogc:def:crs:EPSG::5806")
d = json.loads(s)
ans = """{"bbox": [-121.5, 33.57, -71.02, 46.6], "properties": { "n": [ "Phoenix, Arizona", "Sacramento, California", "Atlanta, Georgia", "Indianapolis, Indiana", "Helena, Montana", "Columbus, Ohio", "Richmond, Virginia", "Topeka, Kansas", "Boston, Massachusetts", "Lincoln, Nebraska" ] }, "type": "Feature", "geometry": {"type": "MultiPoint", "coordinates": [ [ -112.1, 33.57 ], [ -121.5, 38.57 ], [ -84.42, 33.76 ], [ -86.15, 39.78 ], [ -112.0, 46.6 ], [ -82.99, 39.98 ], [ -77.48, 37.53 ], [ -95.69, 39.04 ], [ -71.02, 42.33 ], [ -96.68, 40.81 ] ] }, "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::5806" } } } """
self.verifyJSON(s, ans)
return
def test_write_data_crs(self):
capitols = Multipoint([Point((-112.1, 33.57), crs=LonLatWGS84),
Point((-121.5, 38.57), crs=LonLatWGS84),
Point((-84.42, 33.76), crs=LonLatWGS84),
Point((-86.15, 39.78), crs=LonLatWGS84),
Point((-112.0, 46.6), crs=LonLatWGS84),
Point((-82.99, 39.98), crs=LonLatWGS84),
Point((-77.48, 37.53), crs=LonLatWGS84),
Point((-95.69, 39.04), crs=LonLatWGS84),
Point((-71.02, 42.33), crs=LonLatWGS84),
Point((-96.68, 40.81), crs=LonLatWGS84),
Point((-97.51, 35.47), crs=LonLatWGS84),
Point((-134.2, 58.37), crs=LonLatWGS84),
Point((-100.3, 44.38), crs=LonLatWGS84)])
s = capitols.as_geojson()
self.assertTrue("crs" in s)
self.assertTrue('"name": "urn:ogc:def:crs:OGC:1.3:CRS84"' in s)
return
if __name__ == "__main__":
unittest.main()
|
fortyninemaps/karta
|
tests/geojson_tests.py
|
Python
|
mit
| 12,954
|
[
"COLUMBUS"
] |
e050050830ed2d81230125b1def8197f8a04c3f17277f8558c27ab10ea1bd060
|
import numpy as np
from ctypes import c_int, c_double, c_bool, c_float, c_char_p, c_bool, c_void_p
import ctypes
import os
LIB_PATH = os.path.dirname( os.path.realpath(__file__) )
LIB_PATH_CPP = os.path.normpath(LIB_PATH+'../../../'+'/cpp/Build/libs/Molecular')
def recompile(path):
print( "recompile path :", path )
dir_bak = os.getcwd()
os.chdir( path)
os.system("make" )
os.chdir( dir_bak )
print( os.getcwd() )
# =========== main
recompile(LIB_PATH_CPP)
lib = ctypes.CDLL( LIB_PATH_CPP+"/libCLCFGO_lib.so" )
array1ui = np.ctypeslib.ndpointer(dtype=np.uint32, ndim=1, flags='CONTIGUOUS')
array1i = np.ctypeslib.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array2i = np.ctypeslib.ndpointer(dtype=np.int32, ndim=2, flags='CONTIGUOUS')
array1d = np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
array2d = np.ctypeslib.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS')
array3d = np.ctypeslib.ndpointer(dtype=np.double, ndim=3, flags='CONTIGUOUS')
c_int_p = ctypes.POINTER(c_int)
c_double_p = ctypes.POINTER(c_double)
# ========= C functions
#void loadFromFile( char const* filename, bool bCheck ){
lib.loadFromFile.argtypes = [ c_char_p ]
lib.loadFromFile.restype = c_bool
def loadFromFile( fname ):
return lib.loadFromFile( fname )
#void init( int natom_, int nOrb_, int perOrb_, int natypes_ ){
lib.init.argtypes = [ c_int, c_int, c_int, c_int ]
lib.init.restype = None
def init( natom, nOrb, perOrb, natypes ):
lib.init( natom, nOrb, perOrb, natypes )
# void eval(){
lib.eval.argtypes = [ ]
lib.eval.restype = c_double
def eval( ):
return lib.eval( )
# double evalFunc( double r, double s ){
lib.evalFunc.argtypes = [ c_double, c_double ]
lib.evalFunc.restype = c_double
def evalFunc( r, s ):
return lib.evalFunc( r, s )
#void evalFuncDerivs( int n, double* r, double* s, double* Es, double* Fs ){
lib.evalFuncDerivs.argtypes = [ c_int, array1d, array1d, array1d, array1d ]
lib.evalFuncDerivs.restype = None
def evalFuncDerivs( r, s, Es=None, Fs=None ):
r = r + s*0
s = s + r*0
n = len(r)
if Es is None: Es=np.zeros(n)
if Fs is None: Fs=np.zeros(n)
lib.evalFuncDerivs( n, r, s, Es, Fs )
return Es,Fs
#double coulombOrbPair( int io, int jo ){ return solver.CoulombOrbPair( io, jo ); }
lib.coulombOrbPair.argtypes = [ c_int, c_int ]
lib.coulombOrbPair.restype = c_double
def coulombOrbPair( io, jo ):
return lib.coulombOrbPair( io, jo )
#double projectOrb( int io, bool bNormalize ){
lib.projectOrb.argtypes = [ c_int, c_bool ]
lib.projectOrb.restype = c_double
def projectOrb( io, bNormalize=False ):
return lib.projectOrb( io, bNormalize )
# double* getEnergyPointer(){
lib.getEnergyPointer.argtypes = []
lib.getEnergyPointer.restype = c_double_p
def getEnergyTerms( sh=(7,) ):
# Ek=0, Eee EeePaul EeeExch Eae EaePaul Eaa
ptr = lib.getEnergyPointer()
return np.ctypeslib.as_array( ptr, shape=sh )
#int* getDimPointer (){
lib.getDimPointer.argtypes = []
lib.getDimPointer.restype = c_int_p
def getDimPointer( sh=(8,) ):
# natypes natom nOrb nBas perOrb perOrb2 nqOrb nQtot
ptr = lib.getDimPointer()
return np.ctypeslib.as_array( ptr, shape=sh )
# void printAtomsAndElectrons(){
lib.printAtomsAndElectrons.argtypes = [ ]
lib.printAtomsAndElectrons.restype = None
def printAtomsAndElectrons( ):
lib.printAtomsAndElectrons( )
# void printSetup(){
lib.printSetup .argtypes = [ ]
lib.printSetup .restype = None
def printSetup ( ):
lib.printSetup ( )
#int* getIBuff(const char* name){
lib.getIBuff.argtypes = [c_char_p]
lib.getIBuff.restype = c_int_p
def getIBuff(name,sh):
if not isinstance(sh, tuple): sh=(sh,)
ptr = lib.getIBuff(name)
return np.ctypeslib.as_array( ptr, shape=sh)
#double* getBuff(const char* name){
lib.getBuff.argtypes = [c_char_p]
lib.getBuff.restype = c_double_p
def getBuff(name,sh):
ptr = lib.getBuff(name)
if not isinstance(sh, tuple): sh=(sh,)
#sh_ = (natom,)
#if sh is not None:
# sh_ = sh_ + sh
#print "DEBUG type( ptr ) ", type( ptr ), sh
return np.ctypeslib.as_array( ptr, shape=sh)
#void atomsPotAtPoints( int n, double* ps, double* out, double s, double Q )
#void orbAtPoints ( int io, int n, double* ps, double* out )
#void rhoAtPoints ( int io, int n, double* ps, double* out )
#void hartreeAtPoints ( int io, int n, double* ps, double* out )
#void atomsPotAtPoints( int n, double* ps, double* out, double s, double Q )
lib.atomsPotAtPoints.argtypes = [ c_int, array2d, array1d, c_double, c_double ]
lib.atomsPotAtPoints.restype = c_double
def atomsPotAtPoints( ps, out=None, s=0.0, Q=1.0 ):
n = len(ps)
if out is None: out=np.zeros(n)
lib.atomsPotAtPoints( n, ps, out, s, Q )
return out
#void orbAtPoints ( int io, int n, double* ps, double* out )
lib.orbAtPoints.argtypes = [ c_int, c_int, array2d, array1d ]
lib.orbAtPoints.restype = c_double
def orbAtPoints( ps, io=0, out=None ):
n = len(ps)
if out is None: out=np.zeros(n)
lib.orbAtPoints( io, n, ps, out )
return out
#void rhoAtPoints ( int io, int n, double* ps, double* out )
lib.rhoAtPoints.argtypes = [ c_int, c_int, array2d, array1d ]
lib.rhoAtPoints.restype = c_double
def rhoAtPoints( ps, io=0, out=None ):
n = len(ps)
if out is None: out=np.zeros(n)
lib.rhoAtPoints( io, n, ps, out )
return out
#void hartreeAtPoints ( int io, int n, double* ps, double* out )
lib.hartreeAtPoints.argtypes = [ c_int, c_int, array2d, array1d ]
lib.hartreeAtPoints.restype = c_double
def hartreeAtPoints( ps, io=0, out=None ):
n = len(ps)
if out is None: out=np.zeros(n)
lib.hartreeAtPoints( io, n, ps, out )
return out
#double test_GaussIntegral_ST( int iMODE, int n, double sj, double* sis, double* rs, double* E, double* fr, double* fs ){
lib.test_GaussIntegral_ST.argtypes = [ c_int, c_int, c_double, array1d, array1d, array1d, array1d, array1d ]
lib.test_GaussIntegral_ST.restype = c_double
def test_GaussIntegral_ST( iMODE=0, sj=1.0, sis=None, rs=None, r0=0.0, si=1.0 ):
if rs is None:
n=len(sis)
rs =np.zeros(n); rs[:]=r0
if sis is None:
n=len(rs)
sis=np.zeros(n); sis[:]=si
E=np.zeros(n); fr=np.zeros(n); fs=np.zeros(n)
lib.test_GaussIntegral_ST( iMODE, n, sj, sis, rs, E, fr, fs )
return E, fr, fs
#test_Poisson( double Rmax, double gStep, double * line_rho=0, double* line_rho_=0, bool bPrint=0, bool bSave=0, useWf=true ){
lib.test_Poisson.argtypes = [ c_int, c_double, c_double, array1d, array1d, c_bool, c_bool, c_bool ]
lib.test_Poisson.restype = c_double
def test_Poisson( io=0, Rmax=5.0, dx=0.1, bPrint=False, bSave=False, useWf=True, line_rho=None, line_rho_=None ):
n = int(2*Rmax/dx)
if line_rho is None: line_rho =np.zeros(n)
if line_rho_ is None: line_rho_=np.zeros(n)
err2 = lib.test_Poisson( io, Rmax, dx, line_rho, line_rho_, bPrint, bSave, useWf )
return err2, line_rho, line_rho_
#double test_CrossKinetic( int io, int jo, int nint, double dx, double Rmax, double gStep, double * line_Ek=0, double* line_Ek_g=0, double * line_f1=0, double* line_f2=0, int bPrint=0, bool bSave=0 ){
lib.test_OrbInteraction.argtypes = [ c_int, c_int, c_int, c_int, c_double, c_double, c_double, array1d, array1d, array1d, array1d, c_int, c_bool ]
lib.test_OrbInteraction.restype = c_double
def test_OrbInteraction( iMODE=1, io=0, jo=0, nint=40, dx=0.2, Rmax=5.0, gStep=0.1, bPrint=0, bSave=False, line_Ek=None, line_Ek_g=None, line_f1=None, line_f2=None ):
'''
iMODE : 1) Overlap S12 2) Kinetic T12 3) Colomb K12
'''
ng = int( ( 2*Rmax + nint*dx )/gStep )
#print(" test_OrbInteraction ng ", ng)
if line_Ek is None: line_Ek =np.zeros(nint)
if line_Ek_g is None: line_Ek_g =np.zeros(nint)
if line_f1 is None: line_f1 =np.zeros(ng)
if line_f2 is None: line_f2 =np.zeros(ng)
err2 = lib.test_OrbInteraction( iMODE, io, jo, nint, dx, Rmax, gStep, line_Ek, line_Ek_g, line_f1, line_f2, bPrint, bSave )
#print "line_Ek_g ", line_Ek_g
#print "line_Ek ", line_Ek
return err2, line_Ek, line_Ek_g, line_f1, line_f2
#void testDerivs_Coulomb_model( int n, double x0, double dx ){
lib.testDerivsP_Coulomb_model.argtypes = [ c_int, c_double, c_double ]
lib.testDerivsP_Coulomb_model.restype = c_double
def testDerivsP_Coulomb_model( n=100, x0=0.0, dx=0.1 ):
return lib.testDerivsP_Coulomb_model( n, x0, dx )
#void testDerivs_Coulomb_model( int n, double x0, double dx ){
lib.testDerivsS_Coulomb_model.argtypes = [ c_int, c_double, c_double ]
lib.testDerivsS_Coulomb_model.restype = c_double
def testDerivsS_Coulomb_model( n=100, x0=0.0, dx=0.1 ):
return lib.testDerivsS_Coulomb_model( n, x0, dx )
#void testDerivsP_Total( int n, double x0, double dx ){
lib.testDerivsP_Total.argtypes = [ c_int, c_double, c_double ]
lib.testDerivsP_Total.restype = c_double
def testDerivsP_Total( n=100, x0=0.0, dx=0.1 ):
return lib.testDerivsP_Total( n, x0, dx )
#void testDerivsS_Total( int n, double x0, double dx ){
lib.testDerivsS_Total.argtypes = [ c_int, c_double, c_double ]
lib.testDerivsS_Total.restype = c_double
def testDerivsS_Total( n=100, x0=0.0, dx=0.1 ):
return lib.testDerivsS_Total( n, x0, dx )
#void testDerivsTotal( int n, double* xs, double* Es, double* Fs, int what ){
lib.testDerivsTotal.argtypes = [ c_int, array1d, array1d, array1d, c_int ]
lib.testDerivsTotal.restype = c_double
def testDerivsTotal( xs, Es=None, Fs=None, what=0 ):
n = len(xs)
if Es is None: Es = np.zeros(n)
if Fs is None: Fs = np.zeros(n)
lib.testDerivsTotal( n, xs, Es, Fs, what )
return Es,Fs
#void setSwitches(bool bNormalize, bool bEvalKinetic, bool bEvalCoulomb, bool bEvalExchange, bool bEvalPauli, int iPauliModel, bool bEvalAA, bool bEvalAE, bool bEvalAECoulomb, bool bEvalAEPauli ){
lib.setSwitches.argtypes = [ c_bool, c_bool, c_bool, c_bool, c_bool, c_int, c_bool, c_bool, c_bool, c_bool ]
lib.setSwitches.restype = None
def setSwitches( normalize=True, kinetic=True, coulomb=True, exchange=True, pauli=True, pauliModel=0, AA=True, AE=True, AECoulomb=True, AEPauli=True ):
lib.setSwitches( normalize, kinetic, coulomb, exchange, pauli, pauliModel, AA, AE, AECoulomb, AEPauli )
#void setSwitches_(int bNormalize, int bNormForce, int bEvalKinetic, int bEvalCoulomb, int bEvalExchange, int bEvalPauli, int bEvalAA, int bEvalAE, int bEvalAECoulomb, int bEvalAEPauli ){
lib.setSwitches_.argtypes = [ c_int, c_int, c_int, c_int, c_int, c_int, c_int, c_int, c_int, c_int ]
lib.setSwitches_.restype = None
def setSwitches_( normalize=0, normForce=0, kinetic=0, coulomb=0, exchange=0, pauli=0, AA=0, AE=0, AECoulomb=0, AEPauli=0 ):
lib.setSwitches_( normalize, normForce, kinetic, coulomb, exchange, pauli, AA, AE, AECoulomb, AEPauli )
#void setPauliMode( int iPauli ){
lib.setPauliMode.argtypes = [ c_int ]
lib.setPauliMode.restype = None
def setPauliMode( iPauli ):
lib.setPauliMode( iPauli )
# ========= Python Functions
def getBuffs( natom, norb, perOrb ):
global Ebuf
Ebuf = getEnergyTerms( )
global apos,aforce,aPars,atype
apos = getBuff( "apos", (natom,3) )
aforce = getBuff( "aforce", (natom,3) )
aPars = getBuff ( "aPars", (natom,4) )
atype = getIBuff( "atype", (natom) )
# orbitals
global opos, odip, oEs, oQs, oQs, onq, ospin
opos = getBuff ( "opos", (norb,3) )
odip = getBuff ( "odip", (norb,3) )
oEs = getBuff ( "oEs", (norb) )
oQs = getBuff ( "oQs", (norb) )
onq = getIBuff( "onq", (norb) )
ospin = getIBuff( "ospin", (norb) )
# --- Wave-function components for each orbital
global epos, esize, ecoef
epos = getBuff( "epos", (norb,perOrb,3) )
esize = getBuff( "esize", (norb,perOrb) )
ecoef = getBuff( "ecoef", (norb,perOrb) )
# --- Forces acting on wave-functions components
global efpos, efsize, efcoef
efpos = getBuff( "efpos", (norb,perOrb,3) )
efsize = getBuff( "efsize", (norb,perOrb) )
efcoef = getBuff( "efcoef", (norb,perOrb) )
# --- Forces acting on wave-functions components
global enfpos, enfsize, enfcoef
enfpos = getBuff( "enfpos", (norb,perOrb,3) )
enfsize = getBuff( "enfsize", (norb,perOrb) )
enfcoef = getBuff( "enfcoef", (norb,perOrb) )
# --- Auxuliary electron density expansion basis functions
global rhoP, rhoQ, rhoS
rhoP = getBuff( "rhoP", (norb,perOrb,3) )
rhoQ = getBuff( "rhoQ", (norb,perOrb) )
rhoS = getBuff( "rhoS", (norb,perOrb) )
# --- Forces acting on auxuliary density basis functions
global rhofP, rhofQ, rhofS
rhofP = getBuff( "rhofP", (norb,perOrb,3) )
rhofQ = getBuff( "rhofQ", (norb,perOrb) )
rhofS = getBuff( "rhofS", (norb,perOrb) )
def test_Gaussian_Overlap_Product_derivatives():
# ============== Gaussian Overlap Product derivatives
#esizes[0][0] = xs
print "esizes", esizes
C,s,p, dCr, dA,dB = ref.product3D_s_deriv( xs,eXpos[0][0], eXpos[0][1],eXpos[0][1] )
(dSsi,dXsi,dXxi,dCsi) = dA
plt.figure()
plt.plot( xs, C, label = "C" )
plt.plot( xs, dCsi, label = "dC/dsa" )
plt.plot( xs_, (C[1:]-C[:-1])/dx, label = "dC/dsa_num", lw=3,ls=":" )
plt.plot( xs, p, label = "p")
plt.plot( xs, dXsi, label = "dp/dsa")
plt.plot( xs_, (p[1:]-p[:-1])/dx, label = "dp/dsa_num", lw=3,ls=":" )
plt.plot( xs, s, label = "s")
plt.plot( xs, dSsi, label = "ds/dsa")
plt.plot( xs_, (s[1:]-s[:-1])/dx, label = "ds/dsa_num", lw=3,ls=":" )
plt.legend()
plt.grid()
plt.minorticks_on()
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='gray')
plt.title('Gaussian Overlap Derivative')
def test_Gaussian_Electrostatics_derivatives():
# ============== Gaussian Electrostatics derivatives
print "esizes", esizes
plt.figure()
E,fr,fs = ref.Coulomb( xs, 1.0 )
plt.plot( xs, E, label = "E(r)")
plt.plot( xs, fr*xs, label = "dE/dr")
plt.plot( xs_, (E[1:]-E[:-1])/dx, label = "dE/dr_num", lw=3,ls=":" )
E,fr,fs = ref.Coulomb( 1.0, xs )
plt.plot( xs, E, label = "E(s)")
plt.plot( xs, fs*xs, label = "dE/ds")
plt.plot( xs_, (E[1:]-E[:-1])/dx, label = "dE/ds_num", lw=3,ls=":" )
plt.legend()
plt.grid()
plt.minorticks_on()
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='gray')
plt.title('Gaussian Coulomb Derivative')
# =========================================
# ============== Derivs in Python ============
# =========================================
def plot_Derivs_Python():
print " ========== Derivs in Python "
#eXpos[0][0] = xa
(E, Fp,Fs) = ref.evalEFtot( ecoefs, esizes, eXpos, xa=xs ); F=Fp
#(E, Fp,Fs) = ref.evalEFtot( ecoefs, esizes, eXpos, sa=xs ); F=Fs
#(E, F) = ref.evalEF_S_off ( xs, ecoefs, esizes, eXpos )
plt.subplot(1,2,2)
#plt.plot( xs, r , label='r' )
#plt.plot( xs, r , label='r' )
#plt.plot( xs, Sab , label='Sab' )
#plt.plot( xs, Qab , label='Qab' )
#plt.plot( xs, dQab, label='dSab_ana' )
#plt.plot( xs_, (Sab[1:]-Sab[:-1])/dx,':', label='dSab_num' )
#plt.figure(figsize=(12,10))
plt.plot( xs, E, label='E' )
plt.plot( xs, F, label='F_ana' )
plt.plot( xs_,(E[1:]-E[:-1])/dx,':', label='F_num', lw=3 )
#plt.plot( xs, fxi, label='fxi' )
plt.title('Python')
plt.legend()
#plt.ylim(-30,40)
#plt.ylim(-5,30)
plt.ylim( ylims[0], ylims[1] )
plt.grid()
plt.minorticks_on()
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='gray')
# =========================================
# ============== Derivs in C++ ============
# =========================================
def plot_Derivs_Cpp():
init(natom,norb,perORb,1) # natom, nOrb, perOrb, natypes
ecoef = getBuff("ecoef",(norb,perORb) )
esize = getBuff("esize",(norb,perORb) )
epos = getBuff("epos" ,(norb,perORb,3))
aQ = getBuff("aQs", (natom,) )
aQsize = getBuff("aQsize",(natom,) )
aPcoef = getBuff("aPcoef",(natom,) )
aPsize = getBuff("aPsize",(natom,) )
apos = getBuff("apos" ,(natom,3) )
plt.subplot(1,2,1)
plt.title('C++')
#plt.plot(l_xs,l_r,label="r")
ecoef[:,:] = np.array(ecoefs)[:,:]
esize[:,:] = np.array(esizes)[:,:]
epos [:,:,0] = np.array(eXpos)[:,:]
epos [:,:,1] = np.array(eYpos)[:,:]
epos [:,:,2] = np.array(eZpos)[:,:]
aQ [:] = np.array(aQs )
aQsize[:] = np.array(aQsizes)
aPcoef[:] = np.array(aPcoefs)
aPsize[:] = np.array(aPsizes)
apos [:,:]= np.array(aposs)[:,:]
#aposs_ = np.array(aposs)[:,:]
#apos [:,1] = aposs_[:,1]
#apos [:,2] = aposs_[:,2]
n = len(xs)
#testDerivs_Coulomb_model( n=n, x0=0.0, dx=0.1 )
print "===>> RUN C++ test : testDerivs_Total "
#setSwitches( normalize=False, kinetic=False, coulomb=False, exchange=False, pauli=False, pauliModel=1, AA=False, AE=True, AECoulomb=False, AEPauli=True );
#setSwitches( normalize=False, kinetic=False, coulomb=False, exchange=False, pauli=False, pauliModel=1, AA=False, AE=True, AECoulomb=True, AEPauli=False );
setSwitches( normalize=False, kinetic=False );
Es,Fs = testDerivsTotal( xs, what=0 ) # position deriv
#Es,Fs = testDerivsTotal( xs, what=1 ) # size deriv
print "===<< DONE C++ test : testDerivs_Total "
plt.plot(xs ,Es,label="E" )
plt.plot(xs ,-Fs,label="Fana")
plt.plot(xs_,(Es[1:]-Es[:-1])/dx,label="Fnum",ls=':',lw=3)
if __name__ == "__main__":
loadFromFile( "../../cpp/sketches_SDL/Molecular/data/e2_1g_2o.fgo" )
#loadFromFile( "../../cpp/sketches_SDL/Molecular/data/H2O_1g_8o.fgo" )
printSetup()
printAtomsAndElectrons()
eval()
exit()
import matplotlib.pyplot as plt
import CLCFGO_coulomb_derivs as ref
natom = 1
aposs = [[0.0,0.0,0.0],]
aQs = [4.0,] # atomic nuclei charge (after screening core electrons)
aPcoefs = [500.0,] # atomic core pauli repulsion coeficient (strenght)
aQsizes = [0.5,] # atomic nuclei/pseudopotential size (radius of core electrons )
aPsizes = [0.1,] # atomic nuclei/pseudopotential size (radius of core electrons )
norb = 1
perORb = 1
ecoefs = [[1.0,], ]
esizes = [[0.1,],]
#eXpos = [[0.,],]
eXpos = [[0.,],]
eYpos = [[0.,],]
eZpos = [[0.,],]
'''
norb = 2
perORb = 2
ecoefs = [[1.0,1.0],[1.0,1.0] ]
esizes = [[1.0,1.0],[1.0,1.0] ]
#eXpos = [[0.,+0.5],[-3.5,-1.5]]
eXpos = [[0.,+0.0],[ -0.5, 0.5]]
eYpos = [[0.,+0.0],[ 0.0, 0.0]]
eZpos = [[0.,+0.0],[ 0.0, 0.0]]
'''
'''
norb = 2
perORb = 2
#ecoefs = [[+0.93,+0.68],[+0.65,+1.3]]
#esizes = [[+1.30,+0.90],[+1.60,+0.7]]
#eXpos = [[+0.00,+0.50],[-3.50,-2.0]]
#eYpos = [[+0.00,+0.00],[+0.00,+0.0]]
#eZpos = [[+0.50,-0.30],[-0.40,+0.8]]
'''
x0 = -1.0
dx = 0.05
xs = np.arange( x0, 4.0, dx )
xs_ = (xs[1:]+xs[:-1])*0.5
#test_Gaussian_Overlap_Product_derivatives()
test_Gaussian_Electrostatics_derivatives()
#plt.show()
#exit()
# =====================
#ylims=[-5,5]
#ylims=[-5,20]
#ylims=[-30,80]
ylims=[-200,400]
plt.figure(figsize=(14,8))
plot_Derivs_Python()
plot_Derivs_Cpp()
plt.legend()
#plt.ylim(-30,40)
#plt.ylim(-5,30)
#plt.xlim(0,l_xs[-3])
#plt.ylim( ylims[0], ylims[1] )
plt.grid()
plt.minorticks_on()
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='gray')
#print "Fc++ %g Fpy %g " %(l_Fana[0],F[0])
plt.show()
|
ProkopHapala/SimpleSimulationEngine
|
python/pyMolecular/CLCFGO.py
|
Python
|
mit
| 19,696
|
[
"Gaussian"
] |
103ef737b5699ba6677f0929fec2ed32844771a6681695332892e2ccef43b27d
|
import tensorflow as tf
import utils.utils as utils
class Cifar10CNN:
def __init__(self, config):
self.config = config
def conv2d(self, data, weight):
return tf.nn.conv2d(data,
weight,
strides=[1, 1, 1, 1],
padding='SAME')
def max_pool(self, data):
return tf.nn.max_pool(data,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
def variable_on_cpu(self, name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32)
return var
def variable_with_weight_decay(self, name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = self.variable_on_cpu(name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def model(self, images, eval=False, image_placeholder=None):
num_classes = int(self.config.get('main', 'num_classes'))
image_size = int(self.config.get('main', 'subsection_image_size'))
num_channels = int(self.config.get('main', 'num_channels'))
with tf.variable_scope('conv1', reuse=eval) as scope:
kernel = self.variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
if image_placeholder is None:
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
else:
whitened_image = tf.image.per_image_whitening(tf.reshape(image_placeholder, [image_size, image_size, num_channels]))
whitened_image_reshaped = tf.reshape(whitened_image, [1, image_size, image_size, num_channels])
conv = tf.nn.conv2d(whitened_image_reshaped, kernel, [1, 1, 1, 1], padding='SAME')
biases = self.variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0,
alpha=0.001 / 9.0, beta=0.75, name='norm1')
with tf.variable_scope('conv2', reuse=eval) as scope:
kernel = self.variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = self.variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0,
alpha=0.001 / 9.0, beta=0.75, name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3', reuse=eval) as scope:
# Move everything into depth so we can perform a single matrix multiply.
if image_placeholder is None:
reshape = tf.reshape(pool2,
[int(self.config.get('main', 'batch_size')), -1])
else:
reshape = tf.reshape(pool2, [1, -1])
dim = reshape.get_shape()[1].value
weights = self.variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = self.variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
# local4
with tf.variable_scope('local4', reuse=eval) as scope:
weights = self.variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = self.variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear', reuse=eval) as scope:
weights = self.variable_with_weight_decay('weights', [192, num_classes],
stddev=1/192.0, wd=0.0)
biases = self.variable_on_cpu('biases', [num_classes],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
return softmax_linear
|
chiochio/pwnml
|
dpwn/models/cifar10_cnn.py
|
Python
|
mit
| 6,216
|
[
"Gaussian"
] |
e465aaf7470c5dee77f5148e7aa8b2f3fa119d00ceab8ae50411bc51f86345a7
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import django_url
from nose.tools import assert_equal
def create_cert_course():
world.clear_courses()
org = 'edx'
number = '999'
name = 'Certificates'
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org=org, number=number, display_name=name)
world.scenario_dict['course_id'] = world.scenario_dict['COURSE'].id
world.UPSELL_LINK_CSS = u'.message-upsell a.action-upgrade[href*="{}"]'.format(
world.scenario_dict['course_id']
)
honor_mode = world.CourseModeFactory.create(
course_id=world.scenario_dict['course_id'],
mode_slug='honor',
mode_display_name='honor mode',
min_price=0,
)
verfied_mode = world.CourseModeFactory.create(
course_id=world.scenario_dict['course_id'],
mode_slug='verified',
mode_display_name='verified cert course',
min_price=16,
suggested_prices='32,64,128',
currency='usd',
)
def register():
url = u'courses/{}/about'.format(world.scenario_dict['course_id'])
world.browser.visit(django_url(url))
world.css_click('section.intro a.register')
assert world.is_css_present('section.wrapper h3.title')
@step(u'I select the audit track$')
def select_the_audit_track(step):
create_cert_course()
register()
btn_css = 'input[name="honor_mode"]'
world.wait(1) # TODO remove this after troubleshooting JZ
world.css_find(btn_css)
world.css_click(btn_css)
def select_contribution(amount=32):
radio_css = 'input[value="{}"]'.format(amount)
world.css_click(radio_css)
assert world.css_find(radio_css).selected
def click_verified_track_button():
world.wait_for_ajax_complete()
btn_css = 'input[value="Pursue a Verified Certificate"]'
world.css_click(btn_css)
@step(u'I select the verified track for upgrade')
def select_verified_track_upgrade(step):
select_contribution(32)
world.wait_for_ajax_complete()
btn_css = 'input[value="Upgrade Your Enrollment"]'
world.css_click(btn_css)
# TODO: might want to change this depending on the changes for upgrade
assert world.is_css_present('section.progress')
@step(u'I select the verified track$')
def select_the_verified_track(step):
create_cert_course()
register()
select_contribution(32)
click_verified_track_button()
assert world.is_css_present('section.progress')
@step(u'I should see the course on my dashboard$')
def should_see_the_course_on_my_dashboard(step):
course_css = 'li.course-item'
assert world.is_css_present(course_css)
@step(u'I go to step "([^"]*)"$')
def goto_next_step(step, step_num):
btn_css = {
'1': '#face_next_button',
'2': '#face_next_link',
'3': '#photo_id_next_link',
'4': '#pay_button',
}
next_css = {
'1': 'div#wrapper-facephoto.carousel-active',
'2': 'div#wrapper-idphoto.carousel-active',
'3': 'div#wrapper-review.carousel-active',
'4': 'div#wrapper-review.carousel-active',
}
world.css_click(btn_css[step_num])
# Pressing the button will advance the carousel to the next item
# and give the wrapper div the "carousel-active" class
assert world.css_find(next_css[step_num])
@step(u'I capture my "([^"]*)" photo$')
def capture_my_photo(step, name):
# Hard coded red dot image
image_data = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg=='
snapshot_script = "$('#{}_image')[0].src = '{}';".format(name, image_data)
# Mirror the javascript of the photo_verification.html page
world.browser.execute_script(snapshot_script)
world.browser.execute_script("$('#{}_capture_button').hide();".format(name))
world.browser.execute_script("$('#{}_reset_button').show();".format(name))
world.browser.execute_script("$('#{}_approve_button').show();".format(name))
assert world.css_find('#{}_approve_button'.format(name))
@step(u'I approve my "([^"]*)" photo$')
def approve_my_photo(step, name):
button_css = {
'face': 'div#wrapper-facephoto li.control-approve',
'photo_id': 'div#wrapper-idphoto li.control-approve',
}
wrapper_css = {
'face': 'div#wrapper-facephoto',
'photo_id': 'div#wrapper-idphoto',
}
# Make sure that the carousel is in the right place
assert world.css_has_class(wrapper_css[name], 'carousel-active')
assert world.css_find(button_css[name])
# HACK: for now don't bother clicking the approve button for
# id_photo, because it is sending you back to Step 1.
# Come back and figure it out later. JZ Aug 29 2013
if name == 'face':
world.css_click(button_css[name])
# Make sure you didn't advance the carousel
assert world.css_has_class(wrapper_css[name], 'carousel-active')
@step(u'I select a contribution amount$')
def select_contribution_amount(step):
select_contribution(32)
@step(u'I confirm that the details match$')
def confirm_details_match(step):
# First you need to scroll down on the page
# to make the element visible?
# Currently chrome is failing with ElementNotVisibleException
world.browser.execute_script("window.scrollTo(0,1024)")
cb_css = 'input#confirm_pics_good'
world.css_click(cb_css)
assert world.css_find(cb_css).checked
@step(u'I am at the payment page')
def at_the_payment_page(step):
world.wait_for_present('input[name=transactionSignature]')
@step(u'I submit valid payment information$')
def submit_payment(step):
# First make sure that the page is done if it still executing
# an ajax query.
world.wait_for_ajax_complete()
button_css = 'input[value=Submit]'
world.css_click(button_css)
@step(u'I have submitted face and ID photos$')
def submitted_face_and_id_photos(step):
step.given('I am logged in')
step.given('I select the verified track')
step.given('I go to step "1"')
step.given('I capture my "face" photo')
step.given('I approve my "face" photo')
step.given('I go to step "2"')
step.given('I capture my "photo_id" photo')
step.given('I approve my "photo_id" photo')
step.given('I go to step "3"')
@step(u'I have submitted photos to verify my identity')
def submitted_photos_to_verify_my_identity(step):
step.given('I have submitted face and ID photos')
step.given('I select a contribution amount')
step.given('I confirm that the details match')
step.given('I go to step "4"')
@step(u'I submit my photos and confirm')
def submit_photos_and_confirm(step):
step.given('I go to step "1"')
step.given('I capture my "face" photo')
step.given('I approve my "face" photo')
step.given('I go to step "2"')
step.given('I capture my "photo_id" photo')
step.given('I approve my "photo_id" photo')
step.given('I go to step "3"')
step.given('I select a contribution amount')
step.given('I confirm that the details match')
step.given('I go to step "4"')
@step(u'I see that my payment was successful')
def see_that_my_payment_was_successful(step):
title = world.css_find('div.wrapper-content-main h3.title')
assert_equal(title.text, u'Congratulations! You are now verified on edX.')
@step(u'I navigate to my dashboard')
def navigate_to_my_dashboard(step):
world.css_click('span.avatar')
assert world.css_find('section.my-courses')
@step(u'I see the course on my dashboard')
def see_the_course_on_my_dashboard(step):
course_link_css = u'section.my-courses a[href*="{}"]'.format(world.scenario_dict['course_id'])
assert world.is_css_present(course_link_css)
@step(u'I see the upsell link on my dashboard')
def see_upsell_link_on_my_dashboard(step):
course_link_css = world.UPSELL_LINK_CSS
assert world.is_css_present(course_link_css)
@step(u'I do not see the upsell link on my dashboard')
def see_no_upsell_link(step):
course_link_css = world.UPSELL_LINK_CSS
assert world.is_css_not_present(course_link_css)
@step(u'I select the upsell link on my dashboard')
def select_upsell_link_on_my_dashboard(step):
# expand the upsell section
world.css_click('.message-upsell')
course_link_css = world.UPSELL_LINK_CSS
# click the actual link
world.css_click(course_link_css)
@step(u'I see that I am on the verified track')
def see_that_i_am_on_the_verified_track(step):
id_verified_css = 'li.course-item article.course.verified'
assert world.is_css_present(id_verified_css)
@step(u'I leave the flow and return$')
def leave_the_flow_and_return(step):
world.visit(u'verify_student/verified/{}/'.format(world.scenario_dict['course_id']))
@step(u'I am at the verified page$')
def see_the_payment_page(step):
assert world.css_find('button#pay_button')
@step(u'I edit my name$')
def edit_my_name(step):
btn_css = 'a.retake-photos'
world.css_click(btn_css)
|
olexiim/edx-platform
|
lms/djangoapps/courseware/features/certificates.py
|
Python
|
agpl-3.0
| 9,052
|
[
"VisIt"
] |
56823adaf2af72af5c6224c0eaee2e5160115437bf5e38898d989ca8429a132a
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
from wtforms import FormField, FieldList, Field, Form
CFG_GROUPS_META = {
'classes': None,
'indication': None,
'description': None
}
"""
Default group metadata.
"""
CFG_FIELD_FLAGS = [
'hidden',
'disabled',
'touched',
]
"""
List of WTForm field flags to be saved in draft.
See more about WTForm field flags on:
http://wtforms.simplecodes.com/docs/1.0.4/fields.html#wtforms.fields.Field.flags
"""
def filter_flags(field):
"""
Return a list of flags (from CFG_FIELD_FLAGS) set on a field.
"""
return filter(lambda flag: getattr(field.flags, flag), CFG_FIELD_FLAGS)
"""
Form customization
you can customize the following for the form
_title: str, the title to be rendered on top of the form
_subtitle: str/html. explanatory text to be shown under the title.
_drafting: bool, show or hide the drafts at the right of the form
"""
class WebDepositForm(Form):
""" Generic WebDeposit Form class """
def __init__(self, *args, **kwargs):
super(WebDepositForm, self).__init__(*args, **kwargs)
if not hasattr(self, 'template'):
self.template = 'deposit/run.html'
if not hasattr(self, '_drafting'):
self._drafting = True
self.type = self.__class__.__name__
def reset_field_data(self, exclude=[]):
"""
Reset the fields.data value to that of field.object_data.
Useful after initializing a form with both formdata and draftdata where
the formdata is missing field values (usually because we are saving a
single field).
@param exclude: List of field names to exclude.
"""
for field in self._fields.values():
field.reset_field_data(exclude=exclude)
def get_groups(self):
"""
Get a list of the (group metadata, list of fields)-tuples
The last element of the list has no group metadata (i.e. None),
and contains the list of fields not assigned to any group.
"""
fields_included = set()
field_groups = []
if hasattr(self, 'groups'):
for group in self.groups:
group_obj = {
'name': group[0],
'meta': CFG_GROUPS_META.copy(),
}
fields = []
for field_name in group[1]:
if field_name in ['-', ]:
fields.append(field_name)
else:
try:
fields.append(self[field_name])
fields_included.add(field_name)
except KeyError:
pass
if len(group) == 3:
group_obj['meta'].update(group[2])
field_groups.append((group_obj, fields))
# Append missing fields not defined in groups
rest_fields = []
for field in self:
if field.name not in fields_included:
rest_fields.append(field)
if rest_fields:
field_groups.append((None, rest_fields))
return field_groups
def get_template(self):
"""
Get template to render this form.
Define a data member `template` to customize which template to use.
By default, it will render the template `deposit/run.html`
"""
return [self.template]
def post_process(self, form=None, formfields=[], submit=False):
"""
Run form post-processing by calling `post_process` on each field,
passing any extra `Form.post_process_<fieldname>` processors to the
field.
If ``formfields'' are specified, only the given fields' processors will
be run (which may touch all fields of the form).
The post processing allows the form to alter other fields in the form,
via e.g. contacting external services (e.g a DOI field could retrieve
title, authors from CrossRef/DataCite).
"""
if form is None:
form = self
for name, field, in self._fields.items():
inline = getattr(
self, 'post_process_%s' % name, None)
if inline is not None:
extra = [inline]
else:
extra = []
field.post_process(form, formfields=formfields,
extra_processors=extra, submit=submit)
def autocomplete(self, name, term, limit=50, _form=None):
"""
Auto complete a form field.
Example::
form = FormClass()
form.autocomplete('related_identifiers-1-scheme','do')
Implementation notes:
The form will first try a fast lookup by field name in the form, and
delegate the auto-completion to the field. This will work for all but
field enclosures (FieldList and FormField). If the first lookup fails,
each field enclosure is checked if they can auto-complete the term,
which usually involves parsing the field name and generating a
stub-field (see further details in wtforms_field module).
@param name: Name of field (e.g. title or related_identifiers-1-scheme)
@param term: Term to return auto-complete results for
@param limit: Maximum number of results to return
@return: None in case field could not be found, otherwise a (possibly
empty) list of results.
"""
if name in self._fields:
res = self._fields[name].perform_autocomplete(
_form or self,
name,
term,
limit=limit,
)
if res is not None:
return res[:limit]
else:
for f in self._fields.values():
# Only check field enclosures which cannot be found with above
# method.
if name.startswith(f.name):
res = f.perform_autocomplete(
_form or self,
name,
term,
limit=limit,
)
if res is not None:
return res[:limit]
return None
def get_flags(self, filter_func=filter_flags):
"""
Return dictionary of fields and their set flags
"""
flags = {}
for f in self._fields.values():
if hasattr(f, 'get_flags'):
flags.update(f.get_flags(filter_func=filter_func))
else:
flags.update({f.name: filter_func(f)})
return flags
def set_flags(self, flags):
"""
Set flags on fields
@param flags: Dictionary of fields and their set flags (same structure
as returned by get_flags).
"""
for f in self._fields.values():
f.set_flags(flags)
@property
def json_data(self):
"""
Return form data in a format suitable for the standard JSON encoder, by
calling Field.json_data() on each field if it exists, otherwise is uses
the value of Field.data.
"""
return dict(
(name, f.json_data if getattr(f, 'json_data', None) else f.data)
for name, f in self._fields.items()
)
@property
def messages(self):
"""
Return a dictionary of form messages.
"""
_messages = {}
for f in self._fields.values():
_messages.update(f.messages)
return dict([
(
fname,
msgs if msgs.get('state', '') or msgs.get('messages', '')
else {}
) for fname, msgs in _messages.items()
])
return _messages
class FormVisitor(object):
"""
Generic form visitor to iterate over all fields in a form. See DataExporter
for example how to export all data.
"""
def visit(self, form_or_field):
if isinstance(form_or_field, FormField):
self.visit_formfield(form_or_field)
elif isinstance(form_or_field, FieldList):
self.visit_fieldlist(form_or_field)
elif isinstance(form_or_field, Form):
self.visit_form(form_or_field)
elif isinstance(form_or_field, Field):
self.visit_field(form_or_field)
def visit_form(self, form):
for field in form:
self.visit(field)
def visit_field(self, field):
pass
def visit_fieldlist(self, fieldlist):
for field in fieldlist.get_entries():
self.visit(field)
def visit_formfield(self, formfield):
self.visit(formfield.form)
class DataExporter(FormVisitor):
"""
Visitor to export form data into dictionary supporting filtering and key
renaming.
Usage::
form = ...
visitor = DataExporter(filter_func=lambda f: not f.flags.disabled)
visitor.visit(form)
Given e.g. the following form::
class MyForm(WebDepositForm):
title = TextField(export_key='my_title')
notes = TextAreaField()
authors = FieldList(FormField(AuthorForm))
the visitor will export a dictionary similar to::
{'my_title': ..., 'notes': ..., authors: [{...}, ...], }
"""
def __init__(self, filter_func=None):
self.data = {}
self.data_stack = [self.data]
if filter_func is not None:
self.filter_func = filter_func
else:
self.filter_func = lambda f: True
def _export_name(self, field):
""" Get dictionary key - defaults to field name """
return field.export_key if getattr(field, 'export_key', None) \
else field.short_name
#
# Stack helper methods
#
def _top_stack_element(self):
return self.data_stack[-1]
def _pop_stack(self):
self.data_stack.pop()
def _push_stack(self, field, prototype):
data = self._top_stack_element()
if isinstance(data, list):
data.append(prototype)
self.data_stack.append(data[-1])
else:
data[self._export_name(field)] = prototype
self.data_stack.append(data[self._export_name(field)])
#
# Visit methods
#
def visit_field(self, field):
if (self.filter_func)(field):
data = self._top_stack_element()
if isinstance(data, list):
data.append(field.data)
else:
data[self._export_name(field)] = field.data
def visit_formfield(self, formfield):
if (self.filter_func)(formfield):
self._push_stack(formfield, {})
super(DataExporter, self).visit_formfield(formfield)
self._pop_stack()
def visit_fieldlist(self, fieldlist):
if (self.filter_func)(fieldlist):
self._push_stack(fieldlist, [])
super(DataExporter, self).visit_fieldlist(fieldlist)
self._pop_stack()
|
MSusik/invenio
|
invenio/modules/deposit/form.py
|
Python
|
gpl-2.0
| 11,838
|
[
"VisIt"
] |
c09a6e51e2deb8bbf0164a2a319ea35b615b7e7944631f86ff3332fa7fdd4db6
|
import math
"""
Parameters for instruments and Abins
"""
# Instruments constants #############################
# These parameters can be changed by a user if necessary
fwhm = 3.0 # approximate value for the full width at half maximum for Gaussian experimental resolutions
# TwoDMap instrument
delta_width = 0.1 # width of narrow Gaussian which approximates Dirac delta
# TOSCA instrument
# TOSCA parameters for calculating Q^2
tosca_final_neutron_energy = 32.0 # Final energy on the crystal analyser in cm-1
tosca_cos_scattering_angle = math.cos(2.356) # Angle of the crystal analyser radians
# TOSCA parameters for resolution function
# sigma = tosca_a * omega * omega + tosca_b * omega + tosca_c
# where sigma is width of Gaussian function
tosca_a = 0.0000001
tosca_b = 0.005
tosca_c = 2.5
# Instruments constants end ##########################
# Abins internal parameters ##########################
# Parameters which can be changed by a user if necessary
# name of the group in the hdf file in which extracted data from DFT phonon calculations are stored
dft_group = "PhononAB"
powder_data_group = "Powder" # name of the group where PowderData is stored
crystal_data_group = "SingleCrystal" # name of the group where SingleCrystalData is stored
s_data_group = "S" # name of the group where dynamical factor is stored
pkt_per_peak = 50 # number of points for each peak broadened by the experimental resolution
bin_width = 1.0 # defines width of bins used in rebinning of S
max_wavenumber = 4100.0 # maximum wavenumber in cm^-1 taken into account while creating workspaces (exclusive)
min_wavenumber = 0.0 # minimal wavenumber in cm^-1 taken into account while creating workspaces (exclusive)
acoustic_phonon_threshold = 0.0 # frequencies below this value are treated as acoustic and neglected.
# threshold expressed as a fraction of max S intensity below which S values are treated as zero
s_relative_threshold = 0.01
# values of S below that value are considered to be zero (to be use in case threshold calculated from
# s_relative_threshold is larger than s_absolute_threshold)
s_absolute_threshold = 10e-8
optimal_size = 5000000 # this is used to create optimal size of chunk energies for which S is calculated
# Actual chunk of energies < optimal_size
threads = 3 # number of threads used in parallel calculations
# Abins internal parameters end ###########################
|
wdzhou/mantid
|
scripts/AbinsModules/AbinsParameters.py
|
Python
|
gpl-3.0
| 2,417
|
[
"CRYSTAL",
"DIRAC",
"Gaussian"
] |
e474c4cc085a7859ffa16f0a435408ca2be70cc8cb14fcb52e3334f9ea747007
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPhonopy(PythonPackage):
"""Phonopy is an open source package for phonon
calculations at harmonic and quasi-harmonic levels."""
homepage = "http://atztogo.github.io/phonopy/index.html"
url = "http://sourceforge.net/projects/phonopy/files/phonopy/phonopy-1.10/phonopy-1.10.0.tar.gz"
version('1.10.0', '973ed1bcea46e21b9bf747aab9061ff6')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/py-phonopy/package.py
|
Python
|
lgpl-2.1
| 1,833
|
[
"phonopy"
] |
28fee2f0a1237a6e25f6fe5d93fe6a7d86c78c765455f148003886e122ee5de7
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
import mock
from stoqlib.gui.dialogs.purchasedetails import PurchaseDetailsDialog
from stoqlib.gui.editors.callseditor import CallsEditor
from stoqlib.gui.editors.workordereditor import WorkOrderEditor
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.gui.widgets.webview import WebView
class TestWebView(GUITest):
@mock.patch('stoqlib.gui.widgets.webview.run_dialog')
@mock.patch('stoqlib.gui.widgets.webview.api.new_store')
def test_dialog_payment_details(self, new_store, run_dialog):
new_store.return_value = self.store
payment = self.create_payment()
web_view = WebView()
web_view.app = None
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
web_view._dialog_payment_details(id=payment.id)
@mock.patch('stoqlib.gui.widgets.webview.run_dialog')
@mock.patch('stoqlib.gui.widgets.webview.api.new_store')
def test_dialog_purchase(self, new_store, run_dialog):
new_store.return_value = self.store
purchase = self.create_purchase_order()
web_view = WebView()
web_view.app = None
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
web_view._dialog_purchase(id=purchase.id)
run_dialog.assert_called_once_with(
PurchaseDetailsDialog, None, self.store, purchase)
@mock.patch('stoqlib.gui.widgets.webview.run_dialog')
@mock.patch('stoqlib.gui.widgets.webview.api.new_store')
def test_dialog_call(self, new_store, run_dialog):
new_store.return_value = self.store
call = self.create_call()
web_view = WebView()
web_view.app = None
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
web_view._dialog_call(id=call.id)
run_dialog.assert_called_once_with(
CallsEditor, None, self.store, call, None, None)
@mock.patch('stoqlib.gui.widgets.webview.run_dialog')
@mock.patch('stoqlib.gui.widgets.webview.api.new_store')
def test_dialog_work_order(self, new_store, run_dialog):
new_store.return_value = self.store
wo = self.create_workorder()
web_view = WebView()
web_view.app = None
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
web_view._dialog_work_order(id=wo.id)
run_dialog.assert_called_once_with(
WorkOrderEditor, None, self.store, wo, visual_mode=False)
def test_show_in_payments_by_date(self):
web_view = WebView()
web_view.app = mock.Mock()
web_view._show_in_payments_by_date('2013-1-1')
web_view.app.window.run_application.assert_called_once_with(
u'receivable', refresh=False)
def test_show_out_payments_by_date(self):
web_view = WebView()
web_view.app = mock.Mock()
web_view._show_out_payments_by_date('2013-1-1')
web_view.app.window.run_application.assert_called_once_with(
u'payable', refresh=False)
def test_show_purchases_by_date(self):
web_view = WebView()
web_view.app = mock.Mock()
web_view._show_purchases_by_date('2013-1-1')
web_view.app.window.run_application.assert_called_once_with(
u'purchase', refresh=False)
def test_show_work_orders_by_date(self):
web_view = WebView()
web_view.app = mock.Mock()
web_view._show_work_orders_by_date('2013-1-1')
web_view.app.window.run_application.assert_called_once_with(
u'services', refresh=False)
|
tiagocardosos/stoq
|
stoqlib/gui/test/test_widgets_webview.py
|
Python
|
gpl-2.0
| 4,647
|
[
"VisIt"
] |
1fb6d4fbd640e2dcbd49545e530f5c4f28fbb82d7876526acb9754f7d3f9c316
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.