text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import unittest
import vtk
from vtk.test import Testing
testInt = 12
testString = "test string"
testFloat = 5.4
class VTKPythonObjectCalldataInvokeEventTest(Testing.vtkTest):
@vtk.calldata_type(vtk.VTK_INT)
def callbackInt(self, caller, event, calldata):
self.calldata = calldata
@vtk.calldata_type(vtk.VTK_STRING)
def callbackString(self, caller, event, calldata):
self.calldata = calldata
@vtk.calldata_type(vtk.VTK_DOUBLE)
def callbackFloat(self, caller, event, calldata):
self.calldata = calldata
@vtk.calldata_type(vtk.VTK_OBJECT)
def callbackObj(self, caller, event, calldata):
self.calldata = calldata
def setUp(self):
self.vtkObj = vtk.vtkObject()
self.vtkObjForCallData = vtk.vtkObject()
def test_int(self):
self.vtkObj.AddObserver(vtk.vtkCommand.AnyEvent, self.callbackInt)
self.vtkObj.InvokeEvent(vtk.vtkCommand.ModifiedEvent, testInt)
self.assertEqual(self.calldata, testInt)
def test_string(self):
self.vtkObj.AddObserver(vtk.vtkCommand.AnyEvent, self.callbackString)
self.vtkObj.InvokeEvent(vtk.vtkCommand.ModifiedEvent, testString)
self.assertEqual(self.calldata, testString)
def test_float(self):
self.vtkObj.AddObserver(vtk.vtkCommand.AnyEvent, self.callbackFloat)
self.vtkObj.InvokeEvent(vtk.vtkCommand.ModifiedEvent, testFloat)
self.assertAlmostEqual(self.calldata, testFloat)
def test_obj(self):
self.vtkObj.AddObserver(vtk.vtkCommand.AnyEvent, self.callbackObj)
self.vtkObj.InvokeEvent(vtk.vtkCommand.ModifiedEvent, self.vtkObjForCallData)
self.assertAlmostEqual(self.calldata, self.vtkObjForCallData)
if __name__ == '__main__':
Testing.main([(VTKPythonObjectCalldataInvokeEventTest, 'test')])
|
SimVascular/VTK
|
Common/Core/Testing/Python/TestInvokeEvent.py
|
Python
|
bsd-3-clause
| 1,832
|
[
"VTK"
] |
d1b664bdce8a6c9477b47665d4b67a207365c7b68c739ed5550abffed05357ed
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert Android xml resources to API 14 compatible.
There are two reasons that we cannot just use API 17 attributes,
so we are generating another set of resources by this script.
1. paddingStart attribute can cause a crash on Galaxy Tab 2.
2. There is a bug that paddingStart does not override paddingLeft on
JB-MR1. This is fixed on JB-MR2.
Therefore, this resource generation script can be removed when
we drop the support for JB-MR1.
Please refer to http://crbug.com/235118 for the details.
"""
import optparse
import os
import re
import shutil
import sys
import xml.dom.minidom as minidom
from util import build_utils
# Note that we are assuming 'android:' is an alias of
# the namespace 'http://schemas.android.com/apk/res/android'.
GRAVITY_ATTRIBUTES = ('android:gravity', 'android:layout_gravity')
# Almost all the attributes that has "Start" or "End" in
# its name should be mapped.
ATTRIBUTES_TO_MAP = {'paddingStart' : 'paddingLeft',
'drawableStart' : 'drawableLeft',
'layout_alignStart' : 'layout_alignLeft',
'layout_marginStart' : 'layout_marginLeft',
'layout_alignParentStart' : 'layout_alignParentLeft',
'layout_toStartOf' : 'layout_toLeftOf',
'paddingEnd' : 'paddingRight',
'drawableEnd' : 'drawableRight',
'layout_alignEnd' : 'layout_alignRight',
'layout_marginEnd' : 'layout_marginRight',
'layout_alignParentEnd' : 'layout_alignParentRight',
'layout_toEndOf' : 'layout_toRightOf'}
ATTRIBUTES_TO_MAP = dict(['android:' + k, 'android:' + v] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
ATTRIBUTES_TO_MAP_REVERSED = dict([v,k] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
def IterateXmlElements(node):
"""minidom helper function that iterates all the element nodes.
Iteration order is pre-order depth-first."""
if node.nodeType == node.ELEMENT_NODE:
yield node
for child_node in node.childNodes:
for child_node_element in IterateXmlElements(child_node):
yield child_node_element
def WarnIfDeprecatedAttribute(name, value, filename):
"""print a warning message if the given attribute is deprecated."""
if name in ATTRIBUTES_TO_MAP_REVERSED:
print >> sys.stderr, ('warning: ' + filename + ' should use ' +
ATTRIBUTES_TO_MAP_REVERSED[name] +
' instead of ' + name)
elif name in GRAVITY_ATTRIBUTES and ('left' in value or 'right' in value):
print >> sys.stderr, ('warning: ' + filename +
' should use start/end instead of left/right for ' +
name)
def WriteDomToFile(dom, filename):
"""Write the given dom to filename."""
build_utils.MakeDirectory(os.path.dirname(filename))
with open(filename, 'w') as f:
dom.writexml(f, '', ' ', '\n', encoding='utf-8')
def HasStyleResource(dom):
"""Return True if the dom is a style resource, False otherwise."""
root_node = IterateXmlElements(dom).next()
return bool(root_node.nodeName == 'resources' and
list(root_node.getElementsByTagName('style')))
def ErrorIfStyleResourceExistsInDir(input_dir):
"""If a style resource is in input_dir, exist with an error message."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = minidom.parse(input_filename)
if HasStyleResource(dom):
raise Exception('error: style file ' + input_filename +
' should be under ' + input_dir +
'-v17 directory. Please refer to '
'http://crbug.com/243952 for the details.')
def GenerateV14LayoutResourceDom(dom, filename_for_warning):
"""Convert layout resource to API 14 compatible layout resource.
Args:
dom: parsed minidom object to be modified.
filename_for_warning: file name to display in case we print warnings.
If None, do not print warning.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
# Iterate all the elements' attributes to find attributes to convert.
for element in IterateXmlElements(dom):
for name, value in list(element.attributes.items()):
# Convert any API 17 Start/End attributes to Left/Right attributes.
# For example, from paddingStart="10dp" to paddingLeft="10dp"
# Note: gravity attributes are not necessary to convert because
# start/end values are backward-compatible. Explained at
# https://plus.sandbox.google.com/+RomanNurik/posts/huuJd8iVVXY?e=Showroom
if name in ATTRIBUTES_TO_MAP:
element.setAttribute(ATTRIBUTES_TO_MAP[name], value)
del element.attributes[name]
is_modified = True
elif filename_for_warning:
WarnIfDeprecatedAttribute(name, value, filename_for_warning)
return is_modified
def GenerateV14StyleResourceDom(dom, filename_for_warning):
"""Convert style resource to API 14 compatible style resource.
Args:
dom: parsed minidom object to be modified.
filename_for_warning: file name to display in case we print warnings.
If None, do not print warning.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
for style_element in dom.getElementsByTagName('style'):
for item_element in style_element.getElementsByTagName('item'):
name = item_element.attributes['name'].value
value = item_element.childNodes[0].nodeValue
if name in ATTRIBUTES_TO_MAP:
item_element.attributes['name'].value = ATTRIBUTES_TO_MAP[name]
is_modified = True
elif filename_for_warning:
WarnIfDeprecatedAttribute(name, value, filename_for_warning)
return is_modified
def GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename):
"""Convert API 17 layout resource to API 14 compatible layout resource.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
If the generated resource is identical to the original resource,
don't do anything. If not, write the generated resource to
output_v14_filename, and copy the original resource to output_v17_filename.
"""
dom = minidom.parse(input_filename)
is_modified = GenerateV14LayoutResourceDom(dom, input_filename)
if is_modified:
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
# Copy the original resource.
build_utils.MakeDirectory(os.path.dirname(output_v17_filename))
shutil.copy2(input_filename, output_v17_filename)
def GenerateV14StyleResource(input_filename, output_v14_filename):
"""Convert API 17 style resources to API 14 compatible style resource.
Write the generated style resource to output_v14_filename.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
"""
dom = minidom.parse(input_filename)
GenerateV14StyleResourceDom(dom, input_filename)
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
def GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir, output_v17_dir):
"""Convert layout resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
output_v17_filename = os.path.join(output_v17_dir, rel_filename)
GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename)
def GenerateV14StyleResourcesInDir(input_dir, output_v14_dir):
"""Convert style resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
GenerateV14StyleResource(input_filename, output_v14_filename)
def VerifyV14ResourcesInDir(input_dir, resource_type):
"""Verify that the resources in input_dir is compatible with v14, i.e., they
don't use attributes that cause crashes on certain devices. Print an error if
they have."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
exception_message = ('error : ' + input_filename + ' has an RTL attribute, '
'i.e., attribute that has "start" or "end" in its name.'
' Pre-v17 resources should not include it because it '
'can cause crashes on certain devices. Please refer to '
'http://crbug.com/243952 for the details.')
dom = minidom.parse(input_filename)
if resource_type in ('layout', 'xml'):
if GenerateV14LayoutResourceDom(dom, None):
raise Exception(exception_message)
elif resource_type == 'values':
if GenerateV14StyleResourceDom(dom, None):
raise Exception(exception_message)
def WarnIfDeprecatedAttributeInDir(input_dir, resource_type):
"""Print warning if resources in input_dir have deprecated attributes, e.g.,
paddingLeft, PaddingRight"""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = minidom.parse(input_filename)
if resource_type in ('layout', 'xml'):
GenerateV14LayoutResourceDom(dom, input_filename)
elif resource_type == 'values':
GenerateV14StyleResourceDom(dom, input_filename)
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
parser.add_option('--res-dir',
help='directory containing resources '
'used to generate v14 compatible resources')
parser.add_option('--res-v14-compatibility-dir',
help='output directory into which '
'v14 compatible resources will be generated')
parser.add_option('--stamp', help='File to touch on success')
parser.add_option('--verify-only', action="store_true", help='Do not generate'
' v14 resources. Instead, just verify that the resources are already '
"compatible with v14, i.e. they don't use attributes that cause crashes "
'on certain devices.')
options, args = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('res_dir', 'res_v14_compatibility_dir')
build_utils.CheckOptions(options, parser, required=required_options)
return options
def main(argv):
options = ParseArgs()
build_utils.DeleteDirectory(options.res_v14_compatibility_dir)
build_utils.MakeDirectory(options.res_v14_compatibility_dir)
for name in os.listdir(options.res_dir):
if not os.path.isdir(os.path.join(options.res_dir, name)):
continue
dir_pieces = name.split('-')
resource_type = dir_pieces[0]
qualifiers = dir_pieces[1:]
api_level_qualifier_index = -1
api_level_qualifier = ''
for index, qualifier in enumerate(qualifiers):
if re.match('v[0-9]+$', qualifier):
api_level_qualifier_index = index
api_level_qualifier = qualifier
break
# Android pre-v17 API doesn't support RTL. Skip.
if 'ldrtl' in qualifiers:
continue
input_dir = os.path.abspath(os.path.join(options.res_dir, name))
if options.verify_only:
if not api_level_qualifier or int(api_level_qualifier[1:]) < 17:
VerifyV14ResourcesInDir(input_dir, resource_type)
else:
WarnIfDeprecatedAttributeInDir(input_dir, resource_type)
else:
# We also need to copy the original v17 resource to *-v17 directory
# because the generated v14 resource will hide the original resource.
output_v14_dir = os.path.join(options.res_v14_compatibility_dir, name)
output_v17_dir = os.path.join(options.res_v14_compatibility_dir, name +
'-v17')
# We only convert layout resources under layout*/, xml*/,
# and style resources under values*/.
if resource_type in ('layout', 'xml'):
if not api_level_qualifier:
GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir,
output_v17_dir)
elif resource_type == 'values':
if api_level_qualifier == 'v17':
output_qualifiers = qualifiers[:]
del output_qualifiers[api_level_qualifier_index]
output_v14_dir = os.path.join(options.res_v14_compatibility_dir,
'-'.join([resource_type] +
output_qualifiers))
GenerateV14StyleResourcesInDir(input_dir, output_v14_dir)
elif not api_level_qualifier:
ErrorIfStyleResourceExistsInDir(input_dir)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gfreed/android_external_chromium-org
|
build/android/gyp/generate_v14_compatible_resources.py
|
Python
|
bsd-3-clause
| 13,424
|
[
"Galaxy"
] |
014f3ffe27dfd703328345ed6254875b078890815919864a0f63032726ec4a07
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from util import merge_path, remove_trailing_junk, is_repeated_path, merge_href
from tokenizer import Tokenizer
class TestUtil(unittest.TestCase):
@unittest.skip('skip')
def test_merge_path(self):
self.assertEqual('/about/annualreport/index.php', merge_path('/a/b/', '../../../about/annualreport/index.php'))
self.assertEqual('/about/annualreport/index.php', merge_path('/', '../../../about/annualreport/index.php'))
self.assertEqual('/about/annualreport/index.php', merge_path('/a/', '../about/annualreport/index.php'))
self.assertEqual('/annualreport/index.php', merge_path('/', '../about/../../annualreport/index.php'))
self.assertEqual('/annualreport/index.php', merge_path('/', 'about/../annualreport/index.php'))
self.assertEqual('/a/about/annualreport/index.php', merge_path('/a/b/', '../about/annualreport/index.php'))
@unittest.skip('skip')
def test_remove_trailing_junk(self):
self.assertEqual(
'http://www.ics.uci.edu?p=2&c=igb-misc',
remove_trailing_junk('http://www.ics.uci.edu?p=2&c=igb-misc/degrees/index/'))
self.assertEqual(
'http://www.ics.uci.edu/computing/linux/shell.php',
remove_trailing_junk('http://www.ics.uci.edu/computing/linux/shell.php/computing/account/'))
self.assertEqual(
'http://www.ics.uci.edu/about/search/index.php',
remove_trailing_junk('http://www.ics.uci.edu/about/search/index.php/about_safety.php/grad/index.php/search_payroll.php/search_graduate.php/search_sao.php/search_dean.php/search_dept_in4matx.php/search_business.php/search_dept_stats.php/search_support.php/search_facilities.php/search_payroll.php/ugrad/index.php/search_graduate.php/about_deanmsg.php/search_dean.php/ICS/ics/about/bren/index.php/about_contact.php/search_dept_stats.php/index.php/bren/index.php/ICS/ICS/search_dept_stats.php/search_business.php/search_external.php/ugrad/search_dept_cs.php/search_sao.php/search_dean.php/../about_safety.php/../about_meet_the_dean.php/../../grad/index.php'))
self.assertEqual(
'http://www.ics.uci.edu/about/visit/../bren/bren_advance.php',
remove_trailing_junk('http://www.ics.uci.edu/about/visit/../bren/bren_advance.php'))
self.assertEqual(remove_trailing_junk('http://www.ics.uci.edu/~pfbaldi?baldiPage=296'), 'http://www.ics.uci.edu/~pfbaldi?baldiPage=296')
@unittest.skip('skip')
def test_is_repeated_path(self):
invalid_urls = [
'http://www.ics.uci.edu/alumni/hall_of_fame/stayconnected/hall_of_fame/hall_of_fame/stayconnected/hall_of_fame/hall_of_fame/inductees.aspx.php',
'http://www.ics.uci.edu/~mlearn/datasets/datasets/datasets/datasets/datasets/datasets/datasets/datasets/datasets/datasets/datasets/Abalone',
'http://www.ics.uci.edu/alumni/hall_of_fame/stayconnected/stayconnected/stayconnected/hall_of_fame/stayconnected/stayconnected/hall_of_fame/index.php'
]
for u in invalid_urls:
self.assertTrue(is_repeated_path(u))
valid_urls = [
'http://www.ics.uci.edu/~pazzani/Publications/OldPublications.html',
'http://www.ics.uci.edu/~theory/269/970103.html',
'http://www.ics.uci.edu/~mlearn/datasets/datasets/CMU+Face+Images',
'http://alderis.ics.uci.edu/files/AMBA_AHB_Functional_Verification_2Masters_Correct.out',
'http://www.ics.uci.edu/~sharad',
'http://www.ics.uci.edu/~sharad/students.html',
'http://mhcid.ics.uci.edu',
'http://www.ics.uci.edu/about/brenhall/index.php',
'http://www.ics.uci.edu/~goodrich/pubs',
'http://www.ics.uci.edu/alumni/stayconnected/hall_of_fame/inductees.aspx.php'
]
for u in valid_urls:
self.assertFalse(is_repeated_path(u))
@unittest.skip('skip')
def test_bs4(self):
from bs4 import BeautifulSoup
import os
from util import is_valid
from tokenizer import Tokenizer
import json
base_path = 'C:\\Users\\Jun-Wei\\Desktop\\webpages_raw'
book_file = 'bookkeeping.json'
upper, lower = '20', '289'
with open(os.path.join(base_path, book_file), 'r', encoding='utf8') as f:
book_data = json.load(f)
url = book_data[upper+'/'+lower]
print(url)
if not is_valid(url):
print('invalid')
else:
with open(os.path.join(base_path, upper, lower), 'r', encoding='utf8') as f:
soup = BeautifulSoup(f.read(), 'html5lib')
#if soup.title:
# print('Title', soup.find('title').text)
if soup.find_all('a'):
token_data = Tokenizer.tokenize_link(url, soup.find_all('a'))
'''
if soup.body:
print(soup.body)
print([s for s in soup.body.stripped_strings])
txt = ' '.join([s for s in soup.body.stripped_strings])
print(txt)
print('---')
for script in soup.body.find_all('script'):
fragment = ' '.join([s for s in script.stripped_strings])
txt = txt.replace(fragment, '')
for script in soup.body.find_all('style'):
fragment = ' '.join([s for s in script.stripped_strings])
txt = txt.replace(fragment, '')
print(txt)
print(Tokenizer.tokenize(txt))
'''
@unittest.skip('skip')
def test_stopwords(self):
print(Tokenizer.get_stopwords())
print(Tokenizer.get_synonyms())
t_list = ['software', 'computer', 'engineering', 'informatics']
token_list = ['software']
self.assertTrue(Tokenizer.contain(t_list, token_list))
token_list = ['software', 'engineering']
self.assertFalse(Tokenizer.contain(t_list, token_list))
token_list = ['software', 'computer']
self.assertTrue(Tokenizer.contain(t_list, token_list))
text = "cristina's software engineering class"
print(Tokenizer.tokenize(text))
text = "I love ai and machine learning, i know what's rest"
print(Tokenizer.tokenize(text))
if __name__ == '__main__':
unittest.main()
|
jwlin/ir-proj3
|
test_util.py
|
Python
|
mit
| 6,453
|
[
"VisIt"
] |
51d3c3060a591aa14eb15b093c4bfeadb7e1da93f71cd7209926cf2588c97311
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import CertificateGenerationConfiguration
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument,redefined-outer-name
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Proctoring tab
if settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False) and course.enable_proctored_exams:
sections.append(_section_proctoring(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
context = {
'course': course,
'old_dashboard_url': reverse('instructor_dashboard_legacy', kwargs={'course_id': unicode(course_key)}),
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_proctoring(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'proctoring',
'section_display_name': _('Proctoring'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=getattr(course_honor_mode[0], 'min_price'), currency=getattr(course_honor_mode[0], 'currency'),
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': settings.FEATURES.get('ENABLE_PROCTORED_EXAMS', False),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
insights_message = _("For analytics about your course, go to {analytics_dashboard_name}.")
insights_message = insights_message.format(
analytics_dashboard_name='{0}{1}</a>'.format(link_start, settings.ANALYTICS_DASHBOARD_NAME)
)
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'insights_message': insights_message,
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
|
nikolas/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 28,063
|
[
"VisIt"
] |
101693460caadfd46bd90faf9cc1e8cb016868c99aecc4e2dfa5940728c56892
|
#! /usr/bin/env python
"""
print DCommands working directory
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces import ConfigCache
Script.setUsageMessage(
"\n".join(
[
__doc__.split("\n")[1],
"Usage:",
" %s [options]" % Script.scriptName,
]
)
)
configCache = ConfigCache()
Script.parseCommandLine(ignoreErrors=True)
configCache.cacheConfig()
session = DSession()
args = Script.getPositionalArgs()
ret = session.getCwd()
print(ret)
if __name__ == "__main__":
main()
|
DIRACGrid/COMDIRAC
|
src/COMDIRAC/Interfaces/scripts/dpwd.py
|
Python
|
gpl-3.0
| 831
|
[
"DIRAC"
] |
7f48ceff70a87f6c82b4fcdfd21456d2a81a96152970e4d18a95970759b4b063
|
from pype.ast import *
from pype.symtab import *
from pype.lib_import import LibraryImporter
from pype.fgir import FGNodeType, FGNode, Flowgraph, FGIR
from pype.error import *
class SymbolTableVisitor(ASTVisitor):
'''
Visitor subclass to walk through a SymbolTable
'''
def __init__(self):
self.symbol_table = SymbolTable()
def return_value(self):
return self.symbol_table
def visit(self, node):
# import statements make library functions available to PYPE
if isinstance(node, ASTImport):
imp = LibraryImporter(node.module)
imp.add_symbols(self.symbol_table)
# traverse the rest of the nodes in the tree
elif isinstance(node, ASTProgram):
if node.children is not None:
for child in node.children:
self.visit(child)
# add symbols for inputs, i.e. anything in an expression_list
# SymbolType: inputs
# ref: None
# scope: enclosing component
elif isinstance(node, ASTInputExpr):
if node.children is not None:
for child in node.children:
sym = Symbol(child.name, SymbolType.input, None)
scope = node.parent.name.name
self.symbol_table.addsym(sym, scope=scope)
# add symbols for assigned names, i.e. the bound name in an
# assignment expression
# SymbolType: var
# ref: None
# scope: enclosing component
elif isinstance(node, ASTAssignmentExpr):
sym = Symbol(node.binding.name, SymbolType.var, None)
scope = node.parent.name.name
self.symbol_table.addsym(sym, scope=scope)
# add symbols for components, i.e. the name of each components
# SymbolType: components
# ref: None
# scope: global
elif isinstance(node, ASTComponent):
sym = Symbol(node.name.name, SymbolType.component, None)
self.symbol_table.addsym(sym)
# traverse the rest of the nodes in the tree
if node.children is not None:
for child in node.children:
self.visit(child)
class LoweringVisitor(ASTModVisitor):
'Produces FGIR from an AST.'
def __init__(self, symtab):
self.symtab = symtab
self.ir = FGIR()
self.current_component = None
def visit(self, astnode):
if isinstance(astnode, ASTComponent):
name = astnode.name.name
self.ir[name] = Flowgraph(name=name)
self.current_component = name
return astnode
def post_visit(self, node, visit_value, child_values):
if isinstance(node, ASTProgram):
return self.ir
elif isinstance(node, ASTInputExpr):
fg = self.ir[self.current_component]
for child_v in child_values:
varname = child_v.name
var_nodeid = fg.get_var(varname)
if var_nodeid is None: # no use yet, declare it
var_nodeid = fg.new_node(FGNodeType.input).nodeid
else: # use before declaration
fg.nodes[var_nodeid].type = FGNodeType.input
fg.set_var(varname, var_nodeid)
fg.add_input(var_nodeid)
return None
elif isinstance(node, ASTOutputExpr):
fg = self.ir[self.current_component]
for child_v in child_values:
n = fg.new_node(FGNodeType.output)
varname = child_v.name
var_nodeid = fg.get_var(varname)
if var_nodeid is None: # use before declaration
# the "unknown" type will be replaced later
var_nodeid = fg.new_node(FGNodeType.unknown).nodeid
fg.set_var(varname, var_nodeid)
# already declared in an assignment or input expression
n.inputs.append(var_nodeid)
fg.add_output(n.nodeid)
return None
elif isinstance(node, ASTAssignmentExpr):
fg = self.ir[self.current_component]
# if a variable use precedes its declaration,
# a stub will be in this table
stub_nodeid = fg.get_var(node.binding.name)
if stub_nodeid is not None: # modify the existing stub
n = fg.nodes[stub_nodeid]
n.type = FGNodeType.assignment
else: # create a new node
n = fg.new_node(FGNodeType.assignment)
child_v = child_values[1]
if isinstance(child_v, FGNode): # subexpressions or literals
n.inputs.append(child_v.nodeid)
elif isinstance(child_v, ASTID): # variable lookup
varname = child_v.name
var_nodeid = fg.get_var(varname)
if var_nodeid is None: # use before declaration
# the "unknown" type will be replaced later
var_nodeid = fg.new_node(FGNodeType.unknown).nodeid
fg.set_var(varname, var_nodeid)
# already declared in an assignment or input expression
n.inputs.append(var_nodeid)
fg.set_var(node.binding.name, n.nodeid)
return None
elif isinstance(node, ASTEvalExpr):
fg = self.ir[self.current_component]
op = self.symtab.lookupsym(node.op.name,
scope=self.current_component)
if op is None:
raise PypeSyntaxError('Undefined operator: '+str(node.op.name))
if op.type == SymbolType.component:
n = fg.new_node(FGNodeType.component, ref=op.name)
elif op.type == SymbolType.libraryfunction:
n = fg.new_node(FGNodeType.libraryfunction, ref=op.ref)
elif op.type == SymbolType.librarymethod:
n = fg.new_node(FGNodeType.librarymethod, ref=op.ref)
else:
raise PypeSyntaxError('Invalid operator of type "' +
str(SymbolType)+'" in expression: ' +
str(node.op.name))
n.inputs = []
for child_v in child_values[1:]:
if isinstance(child_v, FGNode): # subexpressions or literals
n.inputs.append(child_v.nodeid)
elif isinstance(child_v, ASTID): # variable lookup
varname = child_v.name
var_nodeid = fg.get_var(varname)
if var_nodeid is None: # use before declaration
# the "unknown" type will be replaced later
var_nodeid = fg.new_node(FGNodeType.unknown).nodeid
fg.set_var(varname, var_nodeid)
# already declared in an assignment or input expression
n.inputs.append(var_nodeid)
return n
elif isinstance(node, ASTLiteral):
fg = self.ir[self.current_component]
n = fg.new_node(FGNodeType.literal, ref=node.value)
return n
else:
return visit_value
|
Mynti207/cs207project
|
pype/translate.py
|
Python
|
mit
| 7,247
|
[
"VisIt"
] |
cb4bf3c2cf2185f43d29344724f89a621bc6a7569207776f0a7dbcbe78cfb34d
|
"""SCons.Tool.zip
Tool-specific initialization for zip.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/zip.py 4043 2009/02/23 09:06:45 scons"
import os.path
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
try:
import zipfile
internal_zip = 1
except ImportError:
internal_zip = 0
if internal_zip:
zipcompression = zipfile.ZIP_DEFLATED
def zip(target, source, env):
def visit(arg, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
arg.write(path)
compression = env.get('ZIPCOMPRESSION', 0)
zf = zipfile.ZipFile(str(target[0]), 'w', compression)
for s in source:
if s.isdir():
os.path.walk(str(s), visit, zf)
else:
zf.write(str(s))
zf.close()
else:
zipcompression = 0
zip = "$ZIP $ZIPFLAGS ${TARGET.abspath} $SOURCES"
zipAction = SCons.Action.Action(zip, varlist=['ZIPCOMPRESSION'])
ZipBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$ZIPSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for zip to an Environment."""
try:
bld = env['BUILDERS']['Zip']
except KeyError:
bld = ZipBuilder
env['BUILDERS']['Zip'] = bld
env['ZIP'] = 'zip'
env['ZIPFLAGS'] = SCons.Util.CLVar('')
env['ZIPCOM'] = zipAction
env['ZIPCOMPRESSION'] = zipcompression
env['ZIPSUFFIX'] = '.zip'
def exists(env):
return internal_zip or env.Detect('zip')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
BenLand100/rat-pac
|
python/SCons/Tool/zip.py
|
Python
|
bsd-3-clause
| 3,291
|
[
"VisIt"
] |
76dc890be7931b5749b75359751f9df82253fc0f058e32c846ab31a434b39d14
|
# $Id$
#
# Copyright (C) 2007 by Greg Landrum
# All rights reserved
#
from rdkit import Chem,Geometry
from rdkit.Chem import AllChem
from rdkit.Chem.Subshape import SubshapeObjects
from rdkit.Chem.Subshape import BuilderUtils
import time,cPickle
#-----------------------------------------------------------------------------
class SubshapeCombineOperations(object):
UNION=0
SUM=1
INTERSECT=2
#-----------------------------------------------------------------------------
class SubshapeBuilder(object):
gridDims=(20,15,10)
gridSpacing=0.5
winRad=3.0
nbrCount=7
terminalPtRadScale=0.75
fraction=0.25
stepSize=1.0
featFactory=None
def SampleSubshape(self,subshape1,newSpacing):
ogrid=subshape1.grid
rgrid = Geometry.UniformGrid3D(self.gridDims[0],self.gridDims[1],self.gridDims[2],
newSpacing)
for idx in range(rgrid.GetSize()):
l = rgrid.GetGridPointLoc(idx)
v = ogrid.GetValPoint(l)
rgrid.SetVal(idx,v)
res = SubshapeObjects.ShapeWithSkeleton()
res.grid = rgrid
return res;
def GenerateSubshapeShape(self,cmpd,confId=-1,addSkeleton=True,**kwargs):
shape = SubshapeObjects.ShapeWithSkeleton()
shape.grid=Geometry.UniformGrid3D(self.gridDims[0],self.gridDims[1],self.gridDims[2],
self.gridSpacing)
AllChem.EncodeShape(cmpd,shape.grid,ignoreHs=False,confId=confId)
if addSkeleton:
conf = cmpd.GetConformer(confId)
self.GenerateSubshapeSkeleton(shape,conf,kwargs)
return shape
def __call__(self,cmpd,**kwargs):
return self.GenerateSubshapeShape(cmpd,**kwargs)
def GenerateSubshapeSkeleton(self,shape,conf=None,terminalPtsOnly=False,skelFromConf=True):
if conf and skelFromConf:
pts = BuilderUtils.FindTerminalPtsFromConformer(conf,self.winRad,self.nbrCount)
else:
pts = BuilderUtils.FindTerminalPtsFromShape(shape,self.winRad,self.fraction)
pts = BuilderUtils.ClusterTerminalPts(pts,self.winRad,self.terminalPtRadScale)
BuilderUtils.ExpandTerminalPts(shape,pts,self.winRad)
if len(pts)<3:
raise ValueError,'only found %d terminals, need at least 3'%len(pts)
if not terminalPtsOnly:
pts = BuilderUtils.AppendSkeletonPoints(shape.grid,pts,self.winRad,self.stepSize)
for i,pt in enumerate(pts):
BuilderUtils.CalculateDirectionsAtPoint(pt,shape.grid,self.winRad)
if conf and self.featFactory:
BuilderUtils.AssignMolFeatsToPoints(pts,conf.GetOwningMol(),self.featFactory,self.winRad)
shape.skelPts=pts
def CombineSubshapes(self,subshape1,subshape2,operation=SubshapeCombineOperations.UNION):
import copy
cs = copy.deepcopy(subshape1)
if operation==SubshapeCombineOperations.UNION:
cs.grid |= subshape2.grid
elif operation==SubshapeCombineOperations.SUM:
cs.grid += subshape2.grid
elif operation==SubshapeCombineOperations.INTERSECT:
cs.grid &= subshape2.grid
else:
raise ValueError,'bad combination operation'
return cs
if __name__=='__main__':
from rdkit.Chem import AllChem,ChemicalFeatures
from rdkit.Chem.PyMol import MolViewer
#cmpd = Chem.MolFromSmiles('CCCc1cc(C(=O)O)ccc1')
#cmpd = Chem.AddHs(cmpd)
if 1:
cmpd = Chem.MolFromSmiles('C1=CC=C1C#CC1=CC=C1')
cmpd = Chem.AddHs(cmpd)
AllChem.EmbedMolecule(cmpd)
AllChem.UFFOptimizeMolecule(cmpd)
AllChem.CanonicalizeMol(cmpd)
print >>file('testmol.mol','w+'),Chem.MolToMolBlock(cmpd)
else:
cmpd = Chem.MolFromMolFile('testmol.mol')
builder=SubshapeBuilder()
if 1:
shape=builder.GenerateSubshapeShape(cmpd)
v = MolViewer()
if 1:
import tempfile
tmpFile = tempfile.mktemp('.grd')
v.server.deleteAll()
Geometry.WriteGridToFile(shape.grid,tmpFile)
time.sleep(1)
v.ShowMol(cmpd,name='testMol',showOnly=True)
v.server.loadSurface(tmpFile,'testGrid','',2.5)
v.server.resetCGO('*')
cPickle.dump(shape,file('subshape.pkl','w+'))
for i,pt in enumerate(shape.skelPts):
v.server.sphere(tuple(pt.location),.5,(1,0,1),'Pt-%d'%i)
if not hasattr(pt,'shapeDirs'): continue
momBeg = pt.location-pt.shapeDirs[0]
momEnd = pt.location+pt.shapeDirs[0]
v.server.cylinder(tuple(momBeg),tuple(momEnd),.1,(1,0,1),'v-%d'%i)
|
rdkit/rdkit-orig
|
rdkit/Chem/Subshape/SubshapeBuilder.py
|
Python
|
bsd-3-clause
| 4,287
|
[
"PyMOL",
"RDKit"
] |
80de31086289e4b6a1fc00588cc7705bf8159d377c6b8d0633d05d071023c947
|
# -*- coding: utf-8 -*-
import os
import sys
import httplib2
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run_flow
CLIENT_SECRETS_FILE = 'client_secrets.json'
YOUTUBE_SCOPES = ["https://www.googleapis.com/auth/youtube.readonly"]
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console
https://code.google.com/apis/console#access
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
message=MISSING_CLIENT_SECRETS_MESSAGE,
scope=" ".join(YOUTUBE_SCOPES))
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage)
http = credentials.authorize(httplib2.Http())
yt_service = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=http).videos()
|
JohnPeel/Scooter
|
youtube.py
|
Python
|
gpl-3.0
| 1,378
|
[
"VisIt"
] |
10740a5a46ef70333deb71bf9047b30beb30b892a27265fe367f20b0b80c8a40
|
# $HeadURL: $
''' ResourceManagementDB
Module that provides basic methods to access the ResourceManagementDB.
'''
from datetime import datetime
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.ResourceStatusSystem.Utilities import MySQLWrapper
__RCSID__ = '$Id: $'
class ResourceManagementDB( object ):
'''
Class that defines the tables for the ResourceManagementDB on a python dictionary.
'''
# Written PrimaryKey as list on purpose !!
_tablesDB = {}
_tablesDB[ 'AccountingCache' ] = { 'Fields' :
{
#'AccountingCacheID' : 'INT UNSIGNED AUTO_INCREMENT NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'PlotType' : 'VARCHAR(16) NOT NULL',
'PlotName' : 'VARCHAR(64) NOT NULL',
'Result' : 'TEXT NOT NULL',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Name', 'PlotType', 'PlotName' ]
}
_tablesDB[ 'DowntimeCache' ] = { 'Fields' :
{
'DowntimeID' : 'VARCHAR(64) NOT NULL',
'Element' : 'VARCHAR(32) NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'StartDate' : 'DATETIME NOT NULL',
'EndDate' : 'DATETIME NOT NULL',
'Severity' : 'VARCHAR(32) NOT NULL',
'Description' : 'VARCHAR(512) NOT NULL',
'Link' : 'VARCHAR(255) NOT NULL',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL',
'GOCDBServiceType' : 'VARCHAR(32) NOT NULL'
},
'PrimaryKey' : [ 'DowntimeID' ]
}
_tablesDB[ 'GGUSTicketsCache' ] = { 'Fields' :
{
'GocSite' : 'VARCHAR(64) NOT NULL',
'Link' : 'VARCHAR(1024) NOT NULL',
'OpenTickets' : 'INTEGER NOT NULL DEFAULT 0',
'Tickets' : 'VARCHAR(1024) NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'GocSite' ]
}
_tablesDB[ 'JobCache' ] = { 'Fields' :
{
'Site' : 'VARCHAR(64) NOT NULL',
'MaskStatus' : 'VARCHAR(32) NOT NULL',
'Efficiency' : 'DOUBLE NOT NULL DEFAULT 0',
'Status' : 'VARCHAR(16) NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Site' ]
}
_tablesDB[ 'PilotCache' ] = { 'Fields' :
{
'Site' : 'VARCHAR(64) NOT NULL',
'CE' : 'VARCHAR(64) NOT NULL',
'PilotsPerJob' : 'DOUBLE NOT NULL DEFAULT 0',
'PilotJobEff' : 'DOUBLE NOT NULL DEFAULT 0',
'Status' : 'VARCHAR(16) NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Site', 'CE' ]
}
_tablesDB[ 'PolicyResult' ] = { 'Fields' :
{
'Element' : 'VARCHAR(32) NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'PolicyName' : 'VARCHAR(64) NOT NULL',
'StatusType' : 'VARCHAR(16) NOT NULL DEFAULT ""',
'Status' : 'VARCHAR(16) NOT NULL',
'Reason' : 'VARCHAR(512) NOT NULL DEFAULT "Unspecified"',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Element', 'Name', 'StatusType', 'PolicyName' ]
}
_tablesDB[ 'SpaceTokenOccupancyCache' ] = { 'Fields' :
{
'Endpoint' : 'VARCHAR( 64 ) NOT NULL',
'Token' : 'VARCHAR( 64 ) NOT NULL',
'Total' : 'DOUBLE NOT NULL DEFAULT 0',
'Guaranteed' : 'DOUBLE NOT NULL DEFAULT 0',
'Free' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Endpoint', 'Token' ]
}
_tablesDB[ 'TransferCache' ] = { 'Fields' :
{
'SourceName' : 'VARCHAR( 64 ) NOT NULL',
'DestinationName' : 'VARCHAR( 64 ) NOT NULL',
'Metric' : 'VARCHAR( 16 ) NOT NULL',
'Value' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'SourceName', 'DestinationName', 'Metric' ]
}
_tablesDB[ 'UserRegistryCache' ] = { 'Fields' :
{
'Login' : 'VARCHAR(16)',
'Name' : 'VARCHAR(64) NOT NULL',
'Email' : 'VARCHAR(64) NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Login' ]
}
_tablesDB[ 'VOBOXCache' ] = { 'Fields' :
{
'Site' : 'VARCHAR( 64 ) NOT NULL',
'System' : 'VARCHAR( 64 ) NOT NULL',
'ServiceUp' : 'INTEGER NOT NULL DEFAULT 0',
'MachineUp' : 'INTEGER NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Site', 'System' ]
}
_tablesDB[ 'ErrorReportBuffer' ] = { 'Fields' :
{
'ID' : 'INT UNSIGNED AUTO_INCREMENT NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'ElementType' : 'VARCHAR(32) NOT NULL',
'Reporter' : 'VARCHAR(64) NOT NULL',
'ErrorMessage' : 'VARCHAR(512) NOT NULL',
'Operation' : 'VARCHAR(64) NOT NULL',
'Arguments' : 'VARCHAR(512) NOT NULL DEFAULT ""',
'DateEffective' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'ID' ]
}
_tablesLike = {}
_tablesLike[ 'PolicyResultWithID' ] = { 'Fields' :
{
'ID' : 'INT UNSIGNED AUTO_INCREMENT NOT NULL',
'Element' : 'VARCHAR(32) NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'PolicyName' : 'VARCHAR(64) NOT NULL',
'StatusType' : 'VARCHAR(16) NOT NULL DEFAULT ""',
'Status' : 'VARCHAR(8) NOT NULL',
'Reason' : 'VARCHAR(512) NOT NULL DEFAULT "Unspecified"',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'ID' ]
}
_likeToTable = {
'PolicyResultLog' : 'PolicyResultWithID',
'PolicyResultHistory' : 'PolicyResultWithID',
}
def __init__( self, mySQL = None ):
'''
Constructor, accepts any DB or mySQL connection, mostly used for testing
purposes.
'''
self._tableDict = self.__generateTables()
if mySQL is not None:
self.database = mySQL
else:
self.database = DB( 'ResourceManagementDB',
'ResourceStatus/ResourceManagementDB' )
## SQL Methods ###############################################################
def insert( self, params, meta ):
'''
Inserts args in the DB making use of kwargs where parameters such as
the 'table' are specified ( filled automatically by the Client). Typically you
will not pass kwargs to this function, unless you know what are you doing
and you have a very special use case.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
utcnow = datetime.utcnow().replace( microsecond = 0 )
# We force lastCheckTime to utcnow if it is not present on the params
#if not( 'lastCheckTime' in params and not( params[ 'lastCheckTime' ] is None ) ):
if 'lastCheckTime' in params and params[ 'lastCheckTime' ] is None:
params[ 'lastCheckTime' ] = utcnow
if 'dateEffective' in params and params[ 'dateEffective' ] is None:
params[ 'dateEffective' ] = utcnow
return MySQLWrapper.insert( self, params, meta )
def update( self, params, meta ):
'''
Updates row with values given on args. The row selection is done using the
default of MySQLMonkey ( column.primary or column.keyColumn ). It can be
modified using kwargs. The 'table' keyword argument is mandatory, and
filled automatically by the Client. Typically you will not pass kwargs to
this function, unless you know what are you doing and you have a very
special use case.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
# We force lastCheckTime to utcnow if it is not present on the params
#if not( 'lastCheckTime' in params and not( params[ 'lastCheckTime' ] is None ) ):
if 'lastCheckTime' in params and params[ 'lastCheckTime' ] is None:
params[ 'lastCheckTime' ] = datetime.utcnow().replace( microsecond = 0 )
return MySQLWrapper.update( self, params, meta )
def select( self, params, meta ):
'''
Uses arguments to build conditional SQL statement ( WHERE ... ). If the
sql statement desired is more complex, you can use kwargs to interact with
the MySQL buildCondition parser and generate a more sophisticated query.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
return MySQLWrapper.select( self, params, meta )
def delete( self, params, meta ):
'''
Uses arguments to build conditional SQL statement ( WHERE ... ). If the
sql statement desired is more complex, you can use kwargs to interact with
the MySQL buildCondition parser and generate a more sophisticated query.
There is only one forbidden query, with all parameters None ( this would
mean a query of the type `DELETE * from TableName` ). The usage of kwargs
is the same as in the get function.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
return MySQLWrapper.delete( self, params, meta )
## Extended SQL methods ######################################################
def addOrModify( self, params, meta ):
'''
Using the PrimaryKeys of the table, it looks for the record in the database.
If it is there, it is updated, if not, it is inserted as a new entry.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
selectQuery = self.select( params, meta )
if not selectQuery[ 'OK' ]:
return selectQuery
isUpdate = False
if selectQuery[ 'Value' ]:
# Pseudo - code
# for all column not being PrimaryKey and not a time column:
# if one or more column different than params if not None:
# we update dateTime as well
columns = selectQuery[ 'Columns' ]
values = selectQuery[ 'Value' ]
if len( values ) != 1:
return S_ERROR( 'More than one value returned on addOrModify, please report !!' )
selectDict = dict( zip( columns, values[ 0 ] ) )
newDateEffective = None
for key, value in params.items():
if key in ( 'lastCheckTime', 'dateEffective' ):
continue
if value is None:
continue
if value != selectDict[ key[0].upper() + key[1:] ]:
newDateEffective = datetime.utcnow().replace( microsecond = 0 )
break
if 'dateEffective' in params:
params[ 'dateEffective' ] = newDateEffective
userQuery = self.update( params, meta )
isUpdate = True
else:
userQuery = self.insert( params, meta )
# This part only applies to PolicyResult table
logResult = self._logRecord( params, meta, isUpdate )
if not logResult[ 'OK' ]:
return logResult
return userQuery
# FIXME: this method looks unused. Maybe can be removed from the code.
def addIfNotThere( self, params, meta ):
'''
Using the PrimaryKeys of the table, it looks for the record in the database.
If it is not there, it is inserted as a new entry.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
selectQuery = self.select( params, meta )
if not selectQuery[ 'OK' ]:
return selectQuery
if selectQuery[ 'Value' ]:
return selectQuery
return self.insert( params, meta )
## Auxiliar methods ##########################################################
def getTable( self, tableName ):
'''
Returns a table dictionary description given its name
'''
if tableName in self._tableDict:
return S_OK( self._tableDict[ tableName ] )
return S_ERROR( '%s is not on the schema' % tableName )
def getTablesList( self ):
'''
Returns a list of the table names in the schema.
'''
return S_OK( self._tableDict.keys() )
## Protected methods #########################################################
def _checkTable( self ):
'''
Method used by database tools to write the schema
'''
return self.__createTables()
def _logRecord( self, params, meta, isUpdate ):
'''
Method that records every change on a LogTable.
'''
if not ( 'table' in meta and meta[ 'table' ] == 'PolicyResult' ):
return S_OK()
if isUpdate:
# This looks little bit like a non-sense. If we were updating, we may have
# not passed a complete set of parameters, so we have to get all them from the
# database :/. It costs us one more query.
updateRes = self.select( params, meta )
if not updateRes[ 'OK' ]:
return updateRes
params = dict( zip( updateRes[ 'Columns' ], updateRes[ 'Value' ][ 0 ] ))
# Writes to PolicyResult"Log"
meta[ 'table' ] += 'Log'
logRes = self.insert( params, meta )
return logRes
## Private methods ###########################################################
def __createTables( self, tableName = None ):
'''
Writes the schema in the database. If no tableName is given, all tables
are written in the database. If a table is already in the schema, it is
skipped to avoid problems trying to create a table that already exists.
'''
# Horrible SQL here !!
tablesCreatedRes = self.database._query( "show tables" )
if not tablesCreatedRes[ 'OK' ]:
return tablesCreatedRes
tablesCreated = [ tableCreated[0] for tableCreated in tablesCreatedRes[ 'Value' ] ]
tables = {}
if tableName is None:
tables.update( self._tableDict )
elif tableName in self._tableDict:
tables = { tableName : self._tableDict[ tableName ] }
else:
return S_ERROR( '"%s" is not a known table' % tableName )
for tableName in tablesCreated:
if tableName in tables:
del tables[ tableName ]
res = self.database._createTables( tables )
if not res[ 'OK' ]:
return res
# Human readable S_OK message
if res[ 'Value' ] == 0:
res[ 'Value' ] = 'No tables created'
else:
res[ 'Value' ] = 'Tables created: %s' % ( ','.join( tables.keys() ) )
return res
def __generateTables( self ):
'''
Method used to transform the class variables into instance variables,
for safety reasons.
'''
# Avoids copying object.
tables = {}
tables.update( self._tablesDB )
for tableName, tableLike in self._likeToTable.items():
tables[ tableName ] = self._tablesLike[ tableLike ]
return tables
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
vmendez/DIRAC
|
ResourceStatusSystem/DB/ResourceManagementDB.py
|
Python
|
gpl-3.0
| 19,265
|
[
"DIRAC"
] |
87b4e30fb6fd8f04ce3c2d2295fbf4a6bc0d54ec365a06055ba21bbec547a807
|
"""
The B{0install digest} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os, tempfile
from zeroinstall import SafeException, _
from zeroinstall.zerostore import manifest, unpack
from zeroinstall.cmd import UsageError
from zeroinstall import support
syntax = "DIRECTORY | ARCHIVE [EXTRACT]"
def add_options(parser):
parser.add_option("", "--algorithm", help=_("the hash function to use"), metavar="HASH")
def handle(config, options, args):
if len(args) == 1:
extract = None
elif len(args) == 2:
extract = args[1]
else:
raise UsageError()
source = args[0]
alg = manifest.algorithms.get(options.algorithm or 'sha1new', None)
if alg is None:
raise SafeException(_('Unknown algorithm "%s"') % alg)
def do_manifest(d):
if extract is not None:
d = os.path.join(d, extract)
digest = alg.new_digest()
for line in alg.generate_manifest(d):
digest.update((line + '\n').encode('utf-8'))
print(alg.getID(digest))
if os.path.isdir(source):
if extract is not None:
raise SafeException("Can't use extract with a directory")
do_manifest(source)
else:
data = None
tmpdir = tempfile.mkdtemp()
try:
data = open(args[0], 'rb')
unpack.unpack_archive(source, data, tmpdir, extract)
do_manifest(tmpdir)
finally:
support.ro_rmtree(tmpdir)
if data:
data.close()
|
timdiels/0install
|
zeroinstall/cmd/digest.py
|
Python
|
lgpl-2.1
| 1,441
|
[
"VisIt"
] |
99c7b05c7cabb3dd676081444ec4fbfc92c0d54ac9b011fc0e08d9e8998e17a8
|
import os
import glob
import sys
import warnings
import logging
import re
import subprocess
logger = logging.getLogger(__name__)
class NoSuchSensorError(Exception):
def __init__(self, port, type_id=None, name=None):
self.port = port
self.type_id = type_id
self.name =name
def __str__(self):
return "No such sensor port=%d type_id=%d name=%s" % (self.port, self.type_id, self.name)
class NoSuchMotorError(Exception):
def __init__(self, port, _type):
self.port = port
self._type = _type
def __str__(self):
return "No such motor port=%s type=%s" % (self.port, self._type)
class NoSuchLibraryError(Exception):
def __init__(self, lib=""):
self.lib = lib
def __str__(self):
return "No such library %s" % self.lib
class Ev3StringType(object):
@staticmethod
def post_read(value):
return value
@staticmethod
def pre_write(value):
return value
class Ev3IntType(object):
@staticmethod
def post_read(value):
return int(value)
@staticmethod
def pre_write(value):
return str(value)
class Ev3BoolType(object):
@staticmethod
def post_read(value):
return bool(value)
@staticmethod
def pre_write(value):
return '1' if value else '0'
class Ev3OnOffType(object):
@staticmethod
def post_read(value):
return True if value == 'on' else False
@staticmethod
def pre_write(value):
if (value == 'on' or value == 'off'):
return value
else:
return 'on' if bool(value) else 'off'
class create_ev3_property(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, args in self.kwargs.items():
def ev3_property(name, read_only=False, property_type=Ev3StringType):
def fget(self):
return property_type.post_read(self.read_value(name))
def fset(self, value):
self.write_value(
name, property_type.pre_write(value))
return property(fget, None if read_only else fset)
setattr(cls, name, ev3_property(name, **args))
return cls
def get_battery_percentage():
"""
Return an int() of the percentage of battery life remaining
"""
voltage_max = None
voltage_min = None
voltage_now = None
with open('/sys/devices/platform/legoev3-battery/power_supply/legoev3-battery/uevent', 'r') as fh:
for line in fh:
if not voltage_max:
re_voltage_max = re.search('POWER_SUPPLY_VOLTAGE_MAX_DESIGN=(\d+)', line)
if re_voltage_max:
voltage_max = int(re_voltage_max.group(1))
continue
if not voltage_min:
re_voltage_min = re.search('POWER_SUPPLY_VOLTAGE_MIN_DESIGN=(\d+)', line)
if re_voltage_min:
voltage_min = int(re_voltage_min.group(1))
continue
if not voltage_now:
re_voltage_now = re.search('POWER_SUPPLY_VOLTAGE_NOW=(\d+)', line)
if re_voltage_now:
voltage_now = int(re_voltage_now.group(1))
if re_voltage_max and re_voltage_min and re_voltage_now:
break
if voltage_max and voltage_min and voltage_now:
# This happens with the EV3 rechargeable battery if it is fully charge
if voltage_now >= voltage_max:
return 100
# Haven't seen this scenario but it can't hurt to check for it
elif voltage_now <= voltage_min:
return 0
# voltage_now is between the min and max
else:
voltage_max -= voltage_min
voltage_now -= voltage_min
return int(voltage_now/float(voltage_max) * 100)
else:
logger.error('voltage_max %s, voltage_min %s, voltage_now %s' %\
(voltage_max, voltage_min, voltage_now))
return 0
class Ev3Dev(object):
def __init__(self):
self.sys_path = ""
def read_value(self, name):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file) as f:
value = f.read().strip()
return value
else:
return None
def write_value(self, name, value):
attr_file = os.path.join(self.sys_path, name)
if os.path.isfile(attr_file):
with open(attr_file, 'w') as f:
f.write(str(value))
else:
return
@create_ev3_property(
bin_data={'read_only': True},
bin_data_format={'read_only': True},
dp={'read_only': True},
#mode={ 'read_only': False},
modes={'read_only': True},
name={'read_only': True},
port_name={'read_only': True},
type_id={'read_only': True, 'property_type': Ev3IntType},
uevent={'read_only': True},
units={'read_only': True},
value0={'read_only': True, 'property_type': Ev3IntType},
value1={'read_only': True, 'property_type': Ev3IntType},
value2={'read_only': True, 'property_type': Ev3IntType},
value3={'read_only': True, 'property_type': Ev3IntType},
value4={'read_only': True, 'property_type': Ev3IntType},
value5={'read_only': True, 'property_type': Ev3IntType},
value6={'read_only': True, 'property_type': Ev3IntType},
value7={'read_only': True, 'property_type': Ev3IntType}
)
class Msensor(Ev3Dev):
def __init__(self, port=-1, type_id=-1, name=None):
Ev3Dev.__init__(self)
type_id = int(type_id)
sensor_existing = False
if (port > 0):
self.port = port
for p in glob.glob('/sys/class/msensor/sensor*/port_name'):
with open(p) as f:
value = f.read().strip()
if (value == 'in' + str(port)):
self.sys_path = os.path.dirname(p)
sensor_existing = True
break
if (len(glob.glob('/sys/class/msensor/sensor*/type_id')) >0 and type_id > 0 and port == -1):
for p in glob.glob('/sys/class/msensor/sensor*/type_id'):
with open(p) as f:
value = int(f.read().strip())
if (value == type_id):
self.sys_path = os.path.dirname(p)
self.port = int(self.port_name[2:])
sensor_existing = True
break
if (len(glob.glob('/sys/class/msensor/sensor*/name')) >0 and name !=None and port == -1):
for p in glob.glob('/sys/class/msensor/sensor*/name'):
with open(p) as f:
value = f.read().strip()
if (name in value):
self.sys_path = os.path.dirname(p)
self.port = int(self.port_name[2:])
sensor_existing = True
break
if (not sensor_existing):
raise NoSuchSensorError(port, type_id, name)
self._mode = self.read_value('mode')
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if (self._mode != value):
self._mode = value
self.write_value('mode', value)
def mode_force_flush(self, value):
self._mode = value
self.write_value('mode', value)
class Enum(object):
def __init__(self, *args, **kwargs):
for arg in args:
kwargs[arg] = arg
self.enum_dict = kwargs
def __getattr__(self, name):
if (name in self.enum_dict.keys()):
return self.enum_dict[name]
else:
raise NameError("no such item %s" % name)
@create_ev3_property(
duty_cycle={'read_only': True, 'property_type': Ev3IntType},
duty_cycle_sp={'read_only': False, 'property_type': Ev3IntType},
estop={'read_only': False, 'property_type': Ev3IntType},
polarity_mode={'read_only': False},
port_name={'read_only': True},
position={'read_only': False, 'property_type': Ev3IntType},
position_mode={'read_only': False},
position_sp={'read_only': False, 'property_type': Ev3IntType},
pulses_per_second={'read_only': True, 'property_type': Ev3IntType},
pulses_per_second_sp={'read_only': False, 'property_type': Ev3IntType},
ramp_down_sp={'read_only': False, 'property_type': Ev3IntType},
ramp_up_sp={'read_only': False, 'property_type': Ev3IntType},
regulation_mode={'read_only': False, 'property_type': Ev3OnOffType},
#reset={ 'read_only': False},
run={'read_only': False, 'property_type': Ev3BoolType},
run_mode={'read_only': False},
speed_regulation_D={'read_only': False, 'property_type': Ev3IntType},
speed_regulation_I={'read_only': False, 'property_type': Ev3IntType},
speed_regulation_K={'read_only': False, 'property_type': Ev3IntType},
speed_regulation_P={'read_only': False, 'property_type': Ev3IntType},
state={'read_only': True},
stop_mode={'read_only': False},
stop_modes={'read_only': False},
time_sp={'read_only': False, 'property_type': Ev3IntType},
type={'read_only': False},
uevent={'read_only': True}
)
class Motor(Ev3Dev):
STOP_MODE = Enum(COAST='coast', BRAKE='brake', HOLD='hold')
POSITION_MODE = Enum(RELATIVE='relative', ABSOLUTE='absolute')
PORT = Enum('A', 'B', 'C', 'D')
def __init__(self, port='', _type=''):
Ev3Dev.__init__(self)
motor_existing = False
searchpath='/sys/class/tacho-motor/motor*/'
if (len(glob.glob(searchpath + "*"))==0):
searchpath='/sys/class/tacho-motor/tacho-motor*/'
if (port != ''):
self.port = port
for p in glob.glob(searchpath + 'port_name'):
with open(p) as f:
value = f.read().strip()
if (value.lower() == ('out' + port).lower()):
self.sys_path = os.path.dirname(p)
motor_existing = True
break
if (_type != '' and port == ''):
for p in glob.glob(searchpath + 'type'):
with open(p) as f:
value = f.read().strip()
if (value.lower() == _type.lower()):
self.sys_path = os.path.dirname(p)
self.port = self.port_name[3:]
motor_existing = True
break
if (not motor_existing):
raise NoSuchMotorError(port, _type)
def stop(self):
self.run = False
def start(self):
self.run = True
def reset(self):
self.write_value('reset', 1)
def run_forever(self, speed_sp, **kwargs):
self.run_mode = 'forever'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
regulation_mode = self.regulation_mode
if (regulation_mode):
self.pulses_per_second_sp = speed_sp
else:
self.duty_cycle_sp = speed_sp
self.start()
def run_time_limited(self, time_sp, speed_sp, **kwargs):
self.run_mode = 'time'
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
regulation_mode = self.regulation_mode
if (regulation_mode):
self.pulses_per_second_sp = speed_sp
else:
self.duty_cycle_sp = speed_sp
self.time_sp = time_sp
self.start()
def run_position_limited(self, position_sp, speed_sp, **kwargs):
self.run_mode = 'position'
kwargs['regulation_mode'] = True
for k in kwargs:
v = kwargs[k]
if (v != None):
setattr(self, k, v)
self.pulses_per_second_sp = speed_sp
self.position_sp = position_sp
self.start()
def I2CSMBusProxy(cls):
try:
from smbus import SMBus
smbus_proxied_methods = [
m for m in dir(SMBus) if (m.startswith('read') or m.startswith('write'))]
for m in smbus_proxied_methods:
def create_proxied_smb_method(method):
def proxied_smb_method(self, *args, **kwargs):
return getattr(self.b, method)(self.addr, *args, **kwargs)
return proxied_smb_method
setattr(cls, m, create_proxied_smb_method(m))
return cls
except ImportError:
warnings.warn('python-smbus binding not found!')
return cls
@I2CSMBusProxy
class I2CS(object):
def __init__(self, port, addr):
self.port = port
self.i2c_port = port + 2
self.sys_path = '/dev/i2c-%s' % self.i2c_port
if (not os.path.exists(self.sys_path)):
raise NoSuchSensorError(port)
try:
from smbus import SMBus
self.b = SMBus(self.i2c_port)
self.addr = addr
except ImportError:
raise NoSuchLibraryError('smbus')
def read_byte_array(self, reg, _len):
return [self.read_byte_data(reg + r) for r in range(_len)]
def read_byte_array_as_string(self, reg, _len):
return ''.join(chr(r) for r in self.read_byte_array(reg, _len))
class create_i2c_property(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
for name, reg_address_and_read_only in self.kwargs.items():
def i2c_property(reg, read_only=True):
def fget(self):
return self.read_byte_data(reg)
def fset(self, value):
return self.write_byte_data(reg, value)
return property(fget, None if read_only else fset)
if (type(reg_address_and_read_only) == int):
prop = i2c_property(reg_address_and_read_only)
else:
prop = i2c_property(
reg_address_and_read_only[0], **reg_address_and_read_only[1])
setattr(cls, name, prop)
return cls
@create_ev3_property(
brightness={'read_only': False, 'property_type': Ev3IntType},
trigger={'read_only': False},
delay_on={'read_only': False, 'property_type': Ev3IntType},
delay_off={'read_only': False, 'property_type': Ev3IntType}
)
class LEDLight(Ev3Dev):
def __init__(self, light_path):
super(Ev3Dev, self).__init__()
self.sys_path = '/sys/class/leds/' + light_path
class LEDSide (object):
def __init__(self, left_or_right):
self.green = LEDLight('ev3:green:%s' % left_or_right)
self.red = LEDLight('ev3:red:%s' % left_or_right)
self._color = 0
@property
def color(self):
return self._color
@color.setter
def color(self, value):
self.red.brightness = value & 0x01
self.green.brightness = (value >> 1) & 0x01
self._color = value
def get_operation_lights(self):
lights = []
if (self._color & 0x01):
lights.append(self.red)
if ((self._color >> 1) & 0x01):
lights.append(self.green)
return lights
def blink(self, color=0, **kwargs):
if (color != 0):
self.color = color
lights = self.get_operation_lights()
for light in lights:
light.trigger = 'timer'
for p, v in kwargs.items():
setattr(light, p, v)
def on(self):
lights = self.get_operation_lights()
for light in lights:
light.trigger = 'none'
light.brightness = 1
def off(self):
lights = self.get_operation_lights()
for light in lights:
light.trigger = 'none'
light.brightness = 0
class LED(object):
class COLOR:
RED = 1
GREEN = 2
AMBER = 3
left = LEDSide('left')
right = LEDSide('right')
@create_ev3_property(
tone={'read_only': False},
mode={'read_only': True},
volume={'read_only': False, 'property_type': Ev3IntType}
)
class Tone(Ev3Dev):
def __init__(self):
super(Ev3Dev, self).__init__()
self.sys_path = '/sys/devices/platform/snd-legoev3'
def play(self, frequency, milliseconds=1000):
self.tone = '%d %d' % (frequency, milliseconds)
def stop(self):
self.tone = '0'
import os
class Lcd(object):
def __init__(self):
try:
from PIL import Image, ImageDraw
SCREEN_WIDTH = 178
SCREEN_HEIGHT = 128
HW_MEM_WIDTH = int((SCREEN_WIDTH + 31) / 32) * 4
SCREEN_MEM_WIDTH = int((SCREEN_WIDTH + 7) / 8)
LCD_BUFFER_LENGTH = SCREEN_MEM_WIDTH * SCREEN_HEIGHT
LCD_HW_BUFFER_LENGTH = HW_MEM_WIDTH * SCREEN_HEIGHT
self._buffer = Image.new(
"1", (HW_MEM_WIDTH * 8, SCREEN_HEIGHT), "white")
self._draw = ImageDraw.Draw(self._buffer)
except ImportError:
raise NoSuchLibraryError('PIL')
def update(self):
f = os.open('/dev/fb0', os.O_RDWR)
os.write(f, self._buffer.tobytes("raw", "1;IR"))
os.close(f)
@property
def buffer(self):
return self._buffer
@property
def draw(self):
return self._draw
def reset(self):
self._draw.rectangle(
(0, 0) + self._buffer.size, outline='white', fill='white')
class attach_ev3_keys(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, cls):
key_const = {}
for key_name, key_code in self.kwargs.items():
def attach_key(key_name, key_code):
def fget(self):
buf = self.polling()
return self.test_bit(key_code, buf)
return property(fget)
setattr(cls, key_name, attach_key(key_name, key_code))
key_const[key_name.upper()] = key_code
setattr(cls, 'CODE', Enum(**key_const))
return cls
import array
import fcntl
@attach_ev3_keys(
up=103,
down=108,
left=105,
right=106,
enter=28,
backspace=14
)
class Key(object):
def __init__(self):
pass
def EVIOCGKEY(self, length):
return 2 << (14 + 8 + 8) | length << (8 + 8) | ord('E') << 8 | 0x18
def test_bit(self, bit, bytes):
# bit in bytes is 1 when released and 0 when pressed
return not bool(bytes[int(bit / 8)] & 1 << bit % 8)
def polling(self):
KEY_MAX = 0x2ff
BUF_LEN = int((KEY_MAX + 7) / 8)
buf = array.array('B', [0] * BUF_LEN)
with open('/dev/input/by-path/platform-gpio-keys.0-event', 'r') as fd:
ret = fcntl.ioctl(fd, self.EVIOCGKEY(len(buf)), buf)
if (ret < 0):
return None
else:
return buf
|
dan-ionut-fechete/ev3
|
ev3/ev3dev.py
|
Python
|
apache-2.0
| 19,250
|
[
"Amber"
] |
1a5df3d008a4adacea8c0ea5dd702e88a2d879eea345071fff3d4acde069e432
|
"""
Loadable.Loadable subclass
"""
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# Nothing alliance specific in here as far as I can tell.
# qebab, 24/6/08.
import re
from munin import loadable
class intel(loadable.loadable):
def __init__(self, cursor):
super().__init__(cursor, 50)
self.paramre = re.compile(r"^\s*(.*)")
self.commentre = re.compile(r"comment=", flags=re.IGNORECASE)
self.usage = self.__class__.__name__ + " <x:y:z> [option=value]+"
self.planet_coordre = re.compile(r"(\d+)[. :-](\d+)[. :-](\d+)(.*)")
self.gal_coordre = re.compile(r"(\d+)[. :-](\d+)")
self.options = [
"alliance",
"nick",
"fakenick",
"defwhore",
"covop",
"scanner",
"distwhore",
"bg",
"gov",
"relay",
"reportchan",
"comment",
]
self.nulls = ["<>", ".", "-", "?", ""]
self.true = ["1", "yes", "y", "true", "t"]
self.false = ["0", "no", "n", "false", "f", ""]
self.helptext = ["Valid options: %s" % (", ".join(self.options))]
def execute(self, user, access, irc_msg):
m = self.paramre.search(irc_msg.command_parameters)
if not m:
irc_msg.reply("Usage: %s" % (self.usage,))
return 1
if access < self.level:
irc_msg.reply("You do not have enough access to use this command")
return 1
par = m.group(1)
m = self.planet_coordre.search(par)
if not m:
m = self.gal_coordre.search(par)
if m:
return self.exec_gal(irc_msg, m.group(1), m.group(2))
else:
irc_msg.reply("Usage: %s" % (self.usage,))
return 1
p = loadable.planet(x=m.group(1), y=m.group(2), z=m.group(3))
params = m.group(4)
if not p.load_most_recent(self.cursor, irc_msg.round):
irc_msg.reply("No planet matching '%s:%s:%s' found" % (p.x, p.y, p.z,))
return 1
i = loadable.intel(pid=p.id)
if not i.load_from_db(self.cursor, irc_msg.round):
pass
opts = self.split_opts(params)
opts["pid"] = p.id
a = loadable.alliance(name=i.alliance)
if i.alliance:
a.load_most_recent(self.cursor, irc_msg.round)
for opt, val in list(opts.items()):
if opt == "alliance":
if val in self.nulls:
a = loadable.alliance(id=None)
continue
a = loadable.alliance(name=val)
if not a.load_most_recent(self.cursor, irc_msg.round):
irc_msg.reply(
"'%s' is not a valid alliance, your information was not added."
% (val,)
)
return 1
else:
opts["alliance"] = a.name
if (opt in self.options) and (val in self.nulls):
opts[opt] = None
continue
if opt in ("nick", "fakenick", "bg", "gov", "reportchan"):
opts[opt] = val
if opt in ("defwhore", "covop", "scanner", "distwhore", "relay"):
if val in self.true:
opts[opt] = True
if val in self.false:
opts[opt] = False
if opt == "comment":
opts[opt] = self.commentre.split(irc_msg.command)[1]
for k in self.options:
if k not in opts:
opts[k] = getattr(i, k)
if i.id:
query = "UPDATE intel SET "
query += "pid=%s,nick=%s,fakenick=%s,defwhore=%s,gov=%s,bg=%s,covop=%s,alliance_id=%s,relay=%s,reportchan=%s,"
query += "scanner=%s,distwhore=%s,comment=%s"
query += " WHERE id=%s"
self.cursor.execute(
query,
(
opts["pid"],
opts["nick"],
opts["fakenick"],
opts["defwhore"],
opts["gov"],
opts["bg"],
opts["covop"],
a.id,
opts["relay"],
opts["reportchan"],
opts["scanner"],
opts["distwhore"],
opts["comment"],
i.id,
),
)
elif params:
query = "INSERT INTO intel (round,pid,nick,fakenick,defwhore,gov,bg,covop,relay,reportchan,scanner,distwhore,comment,alliance_id) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
self.cursor.execute(
query,
(
irc_msg.round,
opts["pid"],
opts["nick"],
opts["fakenick"],
opts["defwhore"],
opts["gov"],
opts["bg"],
opts["covop"],
opts["relay"],
opts["reportchan"],
opts["scanner"],
opts["distwhore"],
opts["comment"],
a.id,
),
)
i = loadable.intel(
pid=opts["pid"],
nick=opts["nick"],
fakenick=opts["fakenick"],
defwhore=opts["defwhore"],
gov=opts["gov"],
bg=opts["bg"],
covop=opts["covop"],
alliance=opts["alliance"],
relay=opts["relay"],
reportchan=opts["reportchan"],
scanner=opts["scanner"],
distwhore=opts["distwhore"],
comment=opts["comment"],
)
reply = "Information stored for %s:%s:%s - " % (p.x, p.y, p.z)
reply += i.__str__()
irc_msg.reply(reply)
return 1
def split_opts(self, params):
param_dict = {}
for s in params.split():
a = s.split("=")
if len(a) != 2:
continue
param_dict[a[0].lower()] = a[1]
return param_dict
def exec_gal(self, irc_msg, x, y):
query = "SELECT i.id AS id, p.id AS pid, p.x AS x, p.y AS y, p.z AS z, i.nick AS nick, i.fakenick AS fakenick, i.defwhore AS defwhore, i.gov AS gov, i.bg AS bg, i.covop AS covop, i.alliance_id AS alliance_id, i.relay AS relay, i.reportchan AS reportchan, i.scanner AS scanner, i.distwhore AS distwhore, i.comment AS comment, a.name AS alliance"
query += " FROM planet_dump AS p, intel AS i"
query += " LEFT JOIN alliance_canon AS a ON i.alliance_id=a.id"
query += " WHERE tick=(SELECT max_tick(%s::smallint)) AND p.round=%s AND p.id=i.pid AND x=%s AND y=%s"
query += " ORDER BY z ASC"
self.cursor.execute(query, (irc_msg.round, irc_msg.round, x, y,))
replied_to_request = False
repls = []
for d in self.cursor.fetchall():
x = d["x"]
y = d["y"]
z = d["z"]
i = loadable.intel(
pid=d["pid"],
nick=d["nick"],
fakenick=d["fakenick"],
defwhore=d["defwhore"],
gov=d["gov"],
bg=d["bg"],
covop=d["covop"],
alliance=d["alliance"],
relay=d["relay"],
reportchan=d["reportchan"],
scanner=d["scanner"],
distwhore=d["distwhore"],
comment=d["comment"],
)
if i.nick or i.alliance:
replied_to_request = True
r = "#%d " % (z,)
if i.nick and i.alliance:
r += "%s [%s]" % (i.nick, i.alliance[:3])
elif i.nick:
r += i.nick
elif i.alliance:
r += "[" + i.alliance[:3] + "]"
repls.append(r)
if not replied_to_request:
irc_msg.reply("No information stored for galaxy %s:%s" % (x, y))
else:
reply = "Intel %d:%d - " % (x, y)
reply += self.gal_info(x, y, irc_msg.round)
reply += " - "
reply += " - ".join(repls)
irc_msg.reply(reply)
return 1
def gal_info(self, x, y, round):
g = loadable.galaxy(x=x, y=y)
g.load_most_recent(self.cursor, round)
return "Score (%d) Value (%d) Size (%d)" % (
g.score_rank,
g.value_rank,
g.size_rank,
)
|
munin/munin
|
munin/mod/intel.py
|
Python
|
gpl-2.0
| 9,500
|
[
"Galaxy"
] |
527f3f388596406d68311a4b656184b7ac54403ad11ff82d003418c456904960
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the main LMS Dashboard (aka, Student Dashboard).
"""
import datetime
from nose.plugins.attrib import attr
from common.test.acceptance.tests.helpers import UniqueCourseTest, generate_course_key
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
DEFAULT_SHORT_DATE_FORMAT = "%b %d, %Y"
DEFAULT_DAY_AND_TIME_FORMAT = "%A at %-I%P"
class BaseLmsDashboardTest(UniqueCourseTest):
""" Base test suite for the LMS Student Dashboard """
def setUp(self):
"""
Initializes the components (page objects, courses, users) for this test suite
"""
# Some parameters are provided by the parent setUp() routine, such as the following:
# self.course_id, self.course_info, self.unique_id
super(BaseLmsDashboardTest, self).setUp()
# Load page objects for use by the tests
self.dashboard_page = DashboardPage(self.browser)
# Configure some aspects of the test course and install the settings into the course
self.course_fixture = CourseFixture(
self.course_info["org"],
self.course_info["number"],
self.course_info["run"],
self.course_info["display_name"],
)
self.course_fixture.add_advanced_settings({
u"social_sharing_url": {u"value": "http://custom/course/url"}
})
self.course_fixture.install()
self.username = "test_{uuid}".format(uuid=self.unique_id[0:6])
self.email = "{user}@example.com".format(user=self.username)
# Create the test user, register them for the course, and authenticate
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
course_id=self.course_id
).visit()
# Navigate the authenticated, enrolled user to the dashboard page and get testing!
self.dashboard_page.visit()
class BaseLmsDashboardTestMultiple(UniqueCourseTest):
""" Base test suite for the LMS Student Dashboard with Multiple Courses"""
def setUp(self):
"""
Initializes the components (page objects, courses, users) for this test suite
"""
# Some parameters are provided by the parent setUp() routine, such as the following:
# self.course_id, self.course_info, self.unique_id
super(BaseLmsDashboardTestMultiple, self).setUp()
# Load page objects for use by the tests
self.dashboard_page = DashboardPage(self.browser)
# Configure some aspects of the test course and install the settings into the course
self.courses = {
'A': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_A',
'display_name': 'Test Course A'
},
'B': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_B',
'display_name': 'Test Course B'
},
'C': {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run_C',
'display_name': 'Test Course C'
}
}
self.username = "test_{uuid}".format(uuid=self.unique_id[0:6])
self.email = "{user}@example.com".format(user=self.username)
self.course_keys = {}
self.course_fixtures = {}
for key, value in self.courses.iteritems():
course_key = generate_course_key(
value['org'],
value['number'],
value['run'],
)
course_fixture = CourseFixture(
value['org'],
value['number'],
value['run'],
value['display_name'],
)
course_fixture.add_advanced_settings({
u"social_sharing_url": {u"value": "http://custom/course/url"}
})
course_fixture.install()
self.course_keys[key] = course_key
self.course_fixtures[key] = course_fixture
# Create the test user, register them for the course, and authenticate
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
course_id=course_key
).visit()
# Navigate the authenticated, enrolled user to the dashboard page and get testing!
self.dashboard_page.visit()
class LmsDashboardPageTest(BaseLmsDashboardTest):
""" Test suite for the LMS Student Dashboard page """
def setUp(self):
super(LmsDashboardPageTest, self).setUp()
# now datetime for usage in tests
self.now = datetime.datetime.now()
def test_dashboard_course_listings(self):
"""
Perform a general validation of the course listings section
"""
course_listings = self.dashboard_page.get_course_listings()
self.assertEqual(len(course_listings), 1)
def test_dashboard_social_sharing_feature(self):
"""
Validate the behavior of the social sharing feature
"""
twitter_widget = self.dashboard_page.get_course_social_sharing_widget('twitter')
twitter_url = "https://twitter.com/intent/tweet?text=Testing+feature%3A%20http%3A%2F%2Fcustom%2Fcourse%2Furl"
self.assertEqual(twitter_widget.attrs('title')[0], 'Share on Twitter')
self.assertEqual(twitter_widget.attrs('data-tooltip')[0], 'Share on Twitter')
self.assertEqual(twitter_widget.attrs('aria-haspopup')[0], 'true')
self.assertEqual(twitter_widget.attrs('aria-expanded')[0], 'false')
self.assertEqual(twitter_widget.attrs('target')[0], '_blank')
self.assertIn(twitter_url, twitter_widget.attrs('href')[0])
self.assertIn(twitter_url, twitter_widget.attrs('onclick')[0])
facebook_widget = self.dashboard_page.get_course_social_sharing_widget('facebook')
facebook_url = "https://www.facebook.com/sharer/sharer.php?u=http%3A%2F%2Fcustom%2Fcourse%2Furl"
self.assertEqual(facebook_widget.attrs('title')[0], 'Share on Facebook')
self.assertEqual(facebook_widget.attrs('data-tooltip')[0], 'Share on Facebook')
self.assertEqual(facebook_widget.attrs('aria-haspopup')[0], 'true')
self.assertEqual(facebook_widget.attrs('aria-expanded')[0], 'false')
self.assertEqual(facebook_widget.attrs('target')[0], '_blank')
self.assertIn(facebook_url, facebook_widget.attrs('href')[0])
self.assertIn(facebook_url, facebook_widget.attrs('onclick')[0])
def test_ended_course_date(self):
"""
Scenario:
Course Date should have the format 'Ended - Sep 23, 2015'
if the course on student dashboard has ended.
As a Student,
Given that I have enrolled to a course
And the course has ended in the past
When I visit dashboard page
Then the course date should have the following format "Ended - %b %d, %Y" e.g. "Ended - Sep 23, 2015"
"""
course_start_date = datetime.datetime(1970, 1, 1)
course_end_date = self.now - datetime.timedelta(days=90)
self.course_fixture.add_course_details({
'start_date': course_start_date,
'end_date': course_end_date
})
self.course_fixture.configure_course()
end_date = course_end_date.strftime(DEFAULT_SHORT_DATE_FORMAT)
expected_course_date = "Ended - {end_date}".format(end_date=end_date)
# reload the page for changes to course date changes to appear in dashboard
self.dashboard_page.visit()
course_date = self.dashboard_page.get_course_date()
# Test that proper course date with 'ended' message is displayed if a course has already ended
self.assertEqual(course_date, expected_course_date)
def test_running_course_date(self):
"""
Scenario:
Course Date should have the format 'Started - Sep 23, 2015'
if the course on student dashboard is running.
As a Student,
Given that I have enrolled to a course
And the course has started
And the course is in progress
When I visit dashboard page
Then the course date should have the following format "Started - %b %d, %Y" e.g. "Started - Sep 23, 2015"
"""
course_start_date = datetime.datetime(1970, 1, 1)
course_end_date = self.now + datetime.timedelta(days=90)
self.course_fixture.add_course_details({
'start_date': course_start_date,
'end_date': course_end_date
})
self.course_fixture.configure_course()
start_date = course_start_date.strftime(DEFAULT_SHORT_DATE_FORMAT)
expected_course_date = "Started - {start_date}".format(start_date=start_date)
# reload the page for changes to course date changes to appear in dashboard
self.dashboard_page.visit()
course_date = self.dashboard_page.get_course_date()
# Test that proper course date with 'started' message is displayed if a course is in running state
self.assertEqual(course_date, expected_course_date)
def test_future_course_date(self):
"""
Scenario:
Course Date should have the format 'Starts - Sep 23, 2015'
if the course on student dashboard starts in future.
As a Student,
Given that I have enrolled to a course
And the course starts in future
And the course does not start within 5 days
When I visit dashboard page
Then the course date should have the following format "Starts - %b %d, %Y" e.g. "Starts - Sep 23, 2015"
"""
course_start_date = self.now + datetime.timedelta(days=30)
course_end_date = self.now + datetime.timedelta(days=365)
self.course_fixture.add_course_details({
'start_date': course_start_date,
'end_date': course_end_date
})
self.course_fixture.configure_course()
start_date = course_start_date.strftime(DEFAULT_SHORT_DATE_FORMAT)
expected_course_date = "Starts - {start_date}".format(start_date=start_date)
# reload the page for changes to course date changes to appear in dashboard
self.dashboard_page.visit()
course_date = self.dashboard_page.get_course_date()
# Test that proper course date with 'starts' message is displayed if a course is about to start in future,
# and course does not start within 5 days
self.assertEqual(course_date, expected_course_date)
def test_near_future_course_date(self):
"""
Scenario:
Course Date should have the format 'Starts - Wednesday at 5am UTC'
if the course on student dashboard starts within 5 days.
As a Student,
Given that I have enrolled to a course
And the course starts within 5 days
When I visit dashboard page
Then the course date should have the following format "Starts - %A at %-I%P UTC"
e.g. "Starts - Wednesday at 5am UTC"
"""
course_start_date = self.now + datetime.timedelta(days=2)
course_end_date = self.now + datetime.timedelta(days=365)
self.course_fixture.add_course_details({
'start_date': course_start_date,
'end_date': course_end_date
})
self.course_fixture.configure_course()
start_date = course_start_date.strftime(DEFAULT_DAY_AND_TIME_FORMAT)
expected_course_date = "Starts - {start_date} UTC".format(start_date=start_date)
# reload the page for changes to course date changes to appear in dashboard
self.dashboard_page.visit()
course_date = self.dashboard_page.get_course_date()
# Test that proper course date with 'starts' message is displayed if a course is about to start in future,
# and course starts within 5 days
self.assertEqual(course_date, expected_course_date)
@attr('a11y')
class LmsDashboardA11yTest(BaseLmsDashboardTestMultiple):
"""
Class to test lms student dashboard accessibility.
"""
def test_dashboard_course_listings_a11y(self):
"""
Test the accessibility of the course listings
"""
course_listings = self.dashboard_page.get_courses()
self.assertEqual(len(course_listings), 3)
self.dashboard_page.a11y_audit.check_for_accessibility_errors()
|
itsjeyd/edx-platform
|
common/test/acceptance/tests/lms/test_lms_dashboard.py
|
Python
|
agpl-3.0
| 12,711
|
[
"VisIt"
] |
8d78cff8b1baf603e8f9eeafcf4f15f85d94c8f717319a905b0770fba9068c5d
|
import sys
import six
import numpy as np
from json import dump, load
from contextlib import contextmanager
from collections import Counter, defaultdict
from subprocess import CalledProcessError
from pysam import (
AlignmentFile, CMATCH, CINS, CDEL, CREF_SKIP, CSOFT_CLIP, CHARD_CLIP, CPAD,
CEQUAL, CDIFF)
from dark.process import Executor
from dark.fasta import FastaReads
from dark.reads import Read, DNARead
class SamError(Exception):
"A SAM error."
class UnequalReferenceLengthError(SamError):
"The references of interest in a SAM/BAM file are not of the same length."
class UnknownReference(SamError):
"Reference sequence not found in SAM/BAM file."
class UnspecifiedReference(SamError):
"Reference sequence not specified."
class ReferenceNameMismatchError(SamError):
"Reference name mismatch."
class InvalidSAM(SamError):
"SAM/BAM file has unexpected/invalid content."
# From https://samtools.github.io/hts-specs/SAMv1.pdf
CONSUMES_QUERY = {CMATCH, CINS, CSOFT_CLIP, CEQUAL, CDIFF}
CONSUMES_REFERENCE = {CMATCH, CDEL, CREF_SKIP, CEQUAL, CDIFF}
@contextmanager
def samfile(filename):
"""
A context manager to open and close a SAM/BAM file.
@param filename: A C{str} file name to open.
"""
f = AlignmentFile(filename)
yield f
f.close()
def samtoolsInstalled():
"""
Test if samtools is installed.
@return: A C{bool}, which is C{True} if DIAMOND seems to be installed.
"""
try:
Executor().execute('samtools help')
except CalledProcessError:
return False
else:
return True
def samReferences(filenameOrSamfile):
"""
List SAM/BAM file reference names.
@param filenameOrSamfile: Either a C{str} SAM/BAM file name or an
instance of C{pysam.AlignmentFile}.
@return: A C{list} of C{str} reference names from the SAM file.
"""
def _references(sam):
return [sam.get_reference_name(i) for i in range(sam.nreferences)]
if isinstance(filenameOrSamfile, six.string_types):
with samfile(filenameOrSamfile) as sam:
return _references(sam)
else:
return _references(filenameOrSamfile)
def samReferencesToStr(filenameOrSamfile, indent=0):
"""
List SAM/BAM file reference names and lengths.
@param filenameOrSamfile: Either a C{str} SAM/BAM file name or an
instance of C{pysam.AlignmentFile}.
@param indent: An C{int} number of spaces to indent each line.
@return: A C{str} describing known reference names and their lengths.
"""
indent = ' ' * indent
def _references(sam):
result = []
for i in range(sam.nreferences):
result.append('%s%s (length %d)' % (
indent, sam.get_reference_name(i), sam.lengths[i]))
return '\n'.join(result)
if isinstance(filenameOrSamfile, six.string_types):
with samfile(filenameOrSamfile) as sam:
return _references(sam)
else:
return _references(sam)
def _hardClip(sequence, quality, cigartuples):
"""
Hard clip (if necessary) a sequence.
@param sequence: A C{str} nucleotide sequence.
@param quality: A C{str} quality string, or a C{list} of C{int} quality
values as returned by pysam, or C{None} if the SAM file had a '*'
for the quality string (which pysam converts to C{None}).
@param cigartuples: An iterable of (operation, length) tuples, detailing
the alignment, as per the SAM specification.
@return: A 3-tuple consisting of
1) a hard-clipped C{str} sequence if hard-clipping is indicated by
the CIGAR operations.
2) a hard-clipped quality C{str} or C{list} (depending on what
type we were passed) if hard-clipping is indicated by the CIGAR
operations.
3) a Boolean, C{True} if hard clipping was performed by this
function or C{False} if the hard clipping had already been
done.
"""
hardClipCount = cigarLength = 0
for (operation, length) in cigartuples:
hardClipCount += operation == CHARD_CLIP
cigarLength += length if operation in CONSUMES_QUERY else 0
sequenceLength = len(sequence)
if quality is not None:
assert sequenceLength == len(quality)
clipLeft = clipRight = 0
clippedSequence = sequence
clippedQuality = quality
if sequenceLength > cigarLength:
alreadyClipped = False
else:
assert sequenceLength == cigarLength
alreadyClipped = True
if hardClipCount == 0:
pass
elif hardClipCount == 1:
# Hard clip either at the start or the end.
if cigartuples[0][0] == CHARD_CLIP:
if not alreadyClipped:
clipLeft = cigartuples[0][1]
clippedSequence = sequence[clipLeft:]
if quality is not None:
clippedQuality = quality[clipLeft:]
elif cigartuples[-1][0] == CHARD_CLIP:
if not alreadyClipped:
clipRight = cigartuples[-1][1]
clippedSequence = sequence[:-clipRight]
if quality is not None:
clippedQuality = quality[:-clipRight]
else:
raise ValueError(
'Invalid CIGAR tuples (%s) contains hard-clipping operation '
'that is neither at the start nor the end of the sequence.' %
(cigartuples,))
elif hardClipCount == 2:
# Hard clip at both the start and end.
assert cigartuples[0][0] == cigartuples[-1][0] == CHARD_CLIP
if not alreadyClipped:
clipLeft, clipRight = cigartuples[0][1], cigartuples[-1][1]
clippedSequence = sequence[clipLeft:-clipRight]
if quality is not None:
clippedQuality = quality[clipLeft:-clipRight]
else:
raise ValueError(
'Invalid CIGAR tuples (%s) specifies hard-clipping %d times (2 '
'is the maximum).' % (cigartuples, hardClipCount))
weClipped = bool(clipLeft or clipRight)
if weClipped:
assert not alreadyClipped
if len(clippedSequence) + clipLeft + clipRight != sequenceLength:
raise ValueError(
'Sequence %r (length %d) clipped to %r (length %d), but the '
'difference between these two lengths (%d) is not equal to '
'the sum (%d) of the left and right clip lengths (%d and %d '
'respectively). CIGAR tuples: %s' %
(sequence, len(sequence),
clippedSequence, len(clippedSequence),
abs(len(sequence) - len(clippedSequence)),
clipLeft + clipRight, clipLeft, clipRight, cigartuples))
else:
assert len(clippedSequence) == sequenceLength
if quality is not None:
assert len(clippedQuality) == sequenceLength
return clippedSequence, clippedQuality, weClipped
class SAMFilter(object):
"""
Filter a SAM/BAM file.
@param filename: The C{str} name of a BAM/SAM file to filter.
@param filterRead: A function that takes a C{dark.reads.Read) instance
and returns either C{None} or a C{Read} instance, according to
whether the passed read should be omitted or not. If C{None} is
passed, no filtering on reads (i.e., queries) is done.
@param referenceIds: Either C{None} or a set of C{str} reference ids
that should be kept (other references will be dropped).
@param storeQueryIds: If C{True}, query ids will be stored (in
C{self.queryIds}) as the SAM/BAM file is read.
@param dropUnmapped: If C{True}, unmapped matches will be excluded.
@param dropSecondary: If C{True}, secondary matches will be excluded.
@param dropSupplementary: If C{True}, supplementary matches will be
excluded.
@param dropDuplicates: If C{True}, matches flagged as optical or PCR
duplicates will be excluded.
@param keepQCFailures: If C{True}, reads that are considered quality
control failures will be included.
@param minScore: If not C{None}, alignments with score tag values less
than this value will not be output. If given, alignments that do not
have a score will not be output.
@param maxScore: If not C{None}, alignments with score tag values greater
than this value will not be output. If given, alignments that do not
have a score will not be output.
@param scoreTag: The alignment tag to extract for minScore and maxScore
comparisons.
"""
def __init__(self, filename, filterRead=None, referenceIds=None,
storeQueryIds=False, dropUnmapped=False,
dropSecondary=False, dropSupplementary=False,
dropDuplicates=False, keepQCFailures=False, minScore=None,
maxScore=None, scoreTag='AS'):
self.filename = filename
self.filterRead = filterRead
self.referenceIds = referenceIds
self.storeQueryIds = storeQueryIds
self.dropUnmapped = dropUnmapped
self.dropSecondary = dropSecondary
self.dropSupplementary = dropSupplementary
self.dropDuplicates = dropDuplicates
self.keepQCFailures = keepQCFailures
self.minScore = minScore
self.maxScore = maxScore
self.scoreTag = scoreTag
# Detect when there are no filtering criteria, in which case
# self.filterAlignment can return immediately.
self.noFiltering = all((
filterRead is None, referenceIds is None,
not any((storeQueryIds, dropUnmapped, dropSecondary,
dropSupplementary, dropDuplicates, keepQCFailures)),
minScore is None, maxScore is None))
@staticmethod
def addFilteringOptions(parser, samfileIsPositional=False,
samfileAction='store', samfileRequired=True,
samfileNargs=None, referenceIdRequired=False):
"""
Add options to an argument parser for filtering SAM/BAM.
@param parser: An C{argparse.ArgumentParser} instance.
@param samfileIsPositional: If C{True} the SAM/BAM file must
be given as the final argument on the command line (without
being preceded by --samfile).
@param samfileAction: A C{str} action to take when 'samfile' arguments
are found on the command line. Pass 'append' to allow multiple SAM
files.
@param samfileRequired: If C{True}, --samfile must be given on the
command line. This is only relevant when samfileIsPositional is
C{False} because positional arguments are always required. It may
seem strange to allow no --samfile argument in a class that filters
SAM files, but this option can be used in conjuction with scripts
that can optionally take a SAM file (e.g.,
genome-protein-summary.py).
@param samfileNargs: The value to pass for 'nargs' in adding the
samfile option.
@param referenceIdRequired: If C{True}, make the --referenceId option
required.
"""
if samfileIsPositional:
# Positional arguments are always required.
assert samfileRequired, ('samfileIsPositional is True, so '
'samfileRequired must also be True.')
if samfileNargs is None:
parser.add_argument(
'samfile', action=samfileAction,
help='The SAM/BAM file to filter.')
else:
parser.add_argument(
'samfile', action=samfileAction, nargs=samfileNargs,
help='The SAM/BAM file to filter.')
else:
if samfileNargs is None:
parser.add_argument(
'--samfile', required=samfileRequired,
action=samfileAction, help='The SAM/BAM file to filter.')
else:
parser.add_argument(
'--samfile', required=samfileRequired, nargs=samfileNargs,
action=samfileAction, help='The SAM/BAM file to filter.')
parser.add_argument(
'--referenceId', metavar='ID', action='append',
required=referenceIdRequired,
help=('A reference sequence id whose alignments should be kept '
'(alignments against other references will be dropped). '
'If omitted, alignments against all references will be '
'kept. May be repeated.'))
parser.add_argument(
'--dropUnmapped', default=False, action='store_true',
help='If given, unmapped matches will not be output.')
parser.add_argument(
'--dropSecondary', default=False, action='store_true',
help='If given, secondary matches will not be output.')
parser.add_argument(
'--dropSupplementary', default=False, action='store_true',
help='If given, supplementary matches will not be output.')
parser.add_argument(
'--dropDuplicates', default=False, action='store_true',
help=('If given, matches flagged as optical or PCR duplicates '
'will not be output.'))
parser.add_argument(
'--keepQCFailures', default=False, action='store_true',
help=('If given, reads that are considered quality control '
'failures will be included in the output.'))
parser.add_argument(
'--minScore', type=float, metavar='FLOAT',
help=('If given, alignments with --scoreTag (default AS) values '
'less than this value will not be output. If given, '
'alignments that do not have a score will not be output.'))
parser.add_argument(
'--maxScore', type=float, metavar='FLOAT',
help=('If given, alignments with --scoreTag (default AS) values '
'greater than this value will not be output. If given, '
'alignments that do not have a score will not be output.'))
parser.add_argument(
'--scoreTag', default='AS', metavar='TAG',
help=('The alignment tag to extract for --minScore and --maxScore '
'comparisons.'))
@classmethod
def parseFilteringOptions(cls, args, filterRead=None, storeQueryIds=False):
"""
Parse command line options (added in C{addSAMFilteringOptions}.
@param args: The command line arguments, as returned by
C{argparse.parse_args}.
@param filterRead: A one-argument function that accepts a read
and returns C{None} if the read should be omitted in filtering
or else a C{Read} instance.
@param storeQueryIds: If C{True}, query ids will be stored as the
SAM/BAM file is read.
@return: A C{SAMFilter} instance.
"""
return cls(
args.samfile,
filterRead=filterRead,
referenceIds=set(args.referenceId) if args.referenceId else None,
storeQueryIds=storeQueryIds,
dropUnmapped=args.dropUnmapped,
dropSecondary=args.dropSecondary,
dropSupplementary=args.dropSupplementary,
dropDuplicates=args.dropDuplicates,
keepQCFailures=args.keepQCFailures,
minScore=args.minScore,
maxScore=args.maxScore)
def filterAlignment(self, alignment):
"""
Test an alignment to see if it passes all filtering criteria.
@param alignment: A pysam alignment instance.
@return: A C{bool}, C{True} if the alignment passes our filtering,
C{False} if it should be discarded.
"""
if self.noFiltering:
return True
if self.minScore is not None or self.maxScore is not None:
try:
score = alignment.get_tag(self.scoreTag)
except KeyError:
return False
else:
if ((self.minScore is not None and score < self.minScore) or
(self.maxScore is not None and score > self.maxScore)):
return False
return ((self.filterRead is None or
self.filterRead(Read(alignment.query_name,
alignment.query_sequence,
alignment.qual))) and
not (
(self.referenceIds and
alignment.reference_name not in self.referenceIds) or
(alignment.is_unmapped and self.dropUnmapped) or
(alignment.is_secondary and self.dropSecondary) or
(alignment.is_supplementary and self.dropSupplementary) or
(alignment.is_duplicate and self.dropDuplicates) or
(alignment.is_qcfail and not self.keepQCFailures)))
def alignments(self):
"""
Get alignments from the SAM/BAM file, subject to filtering.
@return: A generator that yields pysam alignment instances that pass
our filtering criteria.
"""
if self.storeQueryIds:
queryIds = self.queryIds = set()
else:
queryIds = None
lastAlignment = None
count = 0
with samfile(self.filename) as samAlignment:
for count, alignment in enumerate(samAlignment.fetch(), start=1):
if queryIds is not None:
queryIds.add(alignment.query_name)
# Secondary and supplementary alignments may have a '*'
# (pysam returns this as None) SEQ field, indicating that
# the previous sequence should be used. This is best
# practice according to section 2.5.2 of
# https://samtools.github.io/hts-specs/SAMv1.pdf So we use
# the last alignment query and quality strings if we get
# None as a query sequence.
if alignment.query_sequence is None:
if lastAlignment is None:
raise InvalidSAM(
'pysam produced an alignment (number %d) with no '
'query sequence without previously giving an '
'alignment with a sequence.' % count)
# Use the previous query sequence and quality. I'm not
# making the call to _hardClip dependent on
# alignment.cigartuples (as in the else clause below)
# because I don't think it's possible for
# alignment.cigartuples to be None in this case. If we
# have a second match on a query, then it must be
# aligned to something (i.e., it cannot be unmapped
# with no CIGAR string). The assertion will tell us if
# this is ever not the case.
assert alignment.cigartuples
(alignment.query_sequence,
alignment.query_qualities, _) = _hardClip(
lastAlignment.query_sequence,
lastAlignment.query_qualities,
alignment.cigartuples)
else:
lastAlignment = alignment
if alignment.cigartuples:
(alignment.query_sequence,
alignment.query_qualities, _) = _hardClip(
alignment.query_sequence,
alignment.query_qualities,
alignment.cigartuples)
if self.filterAlignment(alignment):
yield alignment
self.alignmentCount = count
def referenceLengths(self):
"""
Get the lengths of wanted references.
@raise UnknownReference: If a reference id is not present in the
SAM/BAM file.
@return: A C{dict} of C{str} reference id to C{int} length with a key
for each reference id in C{self.referenceIds} or for all references
if C{self.referenceIds} is C{None}.
"""
result = {}
with samfile(self.filename) as sam:
if self.referenceIds:
for referenceId in self.referenceIds:
tid = sam.get_tid(referenceId)
if tid == -1:
raise UnknownReference(
'Reference %r is not present in the SAM/BAM file.'
% referenceId)
else:
result[referenceId] = sam.lengths[tid]
else:
result = dict(zip(sam.references, sam.lengths))
return result
class PaddedSAM(object):
"""
Obtain aligned (padded) queries from a SAM/BAM file.
@param samFilter: A C{SAMFilter} instance.
@raises UnequalReferenceLengthError: If C{referenceName} is C{None}
and the reference sequence lengths in the SAM/BAM file are not all
identical.
@raises UnknownReference: If C{referenceName} does not exist.
"""
def __init__(self, samFilter):
referenceLengths = samFilter.referenceLengths()
if len(set(referenceLengths.values())) != 1:
raise UnequalReferenceLengthError(
'Your %d SAM/BAM file reference sequence '
'lengths (%s) are not all identical.' % (
len(referenceLengths),
', '.join(
'%s=%d' % (id_, referenceLengths[id_])
for id_ in sorted(referenceLengths))))
# Get the length of any of the sequences (they are all identical).
self.referenceLength = referenceLengths.popitem()[1]
self.samFilter = samFilter
# self.referenceInsertions will be keyed by query id (the query
# that would cause a reference insertion). The values will be lists
# of 2-tuples, with each 2-tuple containing an offset into the
# reference sequence and the C{str} of nucleotides that would be
# inserted starting at that offset.
self.referenceInsertions = defaultdict(list)
def queries(self, rcSuffix='', rcNeeded=False, padChar='-',
queryInsertionChar='N', unknownQualityChar='!',
allowDuplicateIds=False, addAlignment=False):
"""
Produce padded (with gaps) queries according to the CIGAR string and
reference sequence length for each matching query sequence.
@param rcSuffix: A C{str} to add to the end of query names that are
reverse complemented. This is added before the /1, /2, etc., that
are added for duplicated ids (if there are duplicates and
C{allowDuplicateIds} is C{False}.
@param rcNeeded: If C{True}, queries that are flagged as matching when
reverse complemented should have reverse complementing when
preparing the output sequences. This must be used if the program
that created the SAM/BAM input flags reversed matches but does not
also store the reverse complemented query.
@param padChar: A C{str} of length one to use to pad queries with to
make them the same length as the reference sequence.
@param queryInsertionChar: A C{str} of length one to use to insert
into queries when the CIGAR string indicates that the alignment
of a query would cause a deletion in the reference. This character
is inserted as a 'missing' query character (i.e., a base that can
be assumed to have been lost due to an error) whose existence is
necessary for the match to continue.
@param unknownQualityChar: The character to put into the quality
string when unknown bases are inserted in the query or the query
is padded on the left/right with gaps.
@param allowDuplicateIds: If C{True}, repeated query ids (due to
secondary or supplemental matches) will not have /1, /2, etc.
appended to their ids. So repeated ids may appear in the yielded
FASTA.
@param addAlignment: If C{True} the reads yielded by the returned
generator will also have an C{alignment} attribute, being the
C{pysam.AlignedSegment} for the query.
@raises InvalidSAM: If a query has an empty SEQ field and either there
is no previous alignment or the alignment is not marked as
secondary or supplementary.
@return: A generator that yields C{Read} instances that are padded
with gap characters to align them to the length of the reference
sequence. See C{addAlignment}, above, to yield reads with the
corresponding C{pysam.AlignedSegment}.
"""
referenceLength = self.referenceLength
# Hold the count for each id so we can add /1, /2 etc to duplicate
# ids (unless --allowDuplicateIds was given).
idCount = Counter()
MATCH_OPERATIONS = {CMATCH, CEQUAL, CDIFF}
for lineNumber, alignment in enumerate(
self.samFilter.alignments(), start=1):
query = alignment.query_sequence
quality = ''.join(chr(q + 33) for q in alignment.query_qualities)
if alignment.is_reverse:
if rcNeeded:
query = DNARead('id', query).reverseComplement().sequence
quality = quality[::-1]
if rcSuffix:
alignment.query_name += rcSuffix
# Adjust the query id if it's a duplicate and we're not allowing
# duplicates.
if allowDuplicateIds:
queryId = alignment.query_name
else:
count = idCount[alignment.query_name]
idCount[alignment.query_name] += 1
queryId = alignment.query_name + (
'' if count == 0 else '/%d' % count)
referenceStart = alignment.reference_start
atStart = True
queryIndex = 0
referenceIndex = referenceStart
alignedSequence = ''
alignedQuality = ''
for operation, length in alignment.cigartuples:
# The operations are tested in the order they appear in
# https://samtools.github.io/hts-specs/SAMv1.pdf It would be
# more efficient to test them in order of frequency of
# occurrence.
if operation in MATCH_OPERATIONS:
atStart = False
alignedSequence += query[queryIndex:queryIndex + length]
alignedQuality += quality[queryIndex:queryIndex + length]
elif operation == CINS:
# Insertion to the reference. This consumes query bases but
# we don't output them because the reference cannot be
# changed. I.e., these bases in the query would need to be
# inserted into the reference. Remove these bases from the
# query but record what would have been inserted into the
# reference.
atStart = False
self.referenceInsertions[queryId].append(
(referenceIndex,
query[queryIndex:queryIndex + length]))
elif operation == CDEL:
# Delete from the reference. Some bases from the reference
# would need to be deleted to continue the match. So we put
# an insertion into the query to compensate.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CREF_SKIP:
# Skipped reference. Opens a gap in the query. For
# mRNA-to-genome alignment, an N operation represents an
# intron. For other types of alignments, the
# interpretation of N is not defined. So this is unlikely
# to occur.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CSOFT_CLIP:
# Bases in the query that are not part of the match. We
# remove these from the query if they protrude before the
# start or after the end of the reference. According to the
# SAM docs, 'S' operations may only have 'H' operations
# between them and the ends of the CIGAR string.
if atStart:
# Don't set atStart=False, in case there's another 'S'
# operation.
unwantedLeft = length - referenceStart
if unwantedLeft > 0:
# The query protrudes left. Copy its right part.
alignedSequence += query[
queryIndex + unwantedLeft:queryIndex + length]
alignedQuality += quality[
queryIndex + unwantedLeft:queryIndex + length]
referenceStart = 0
else:
referenceStart -= length
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
else:
unwantedRight = (
(referenceStart + len(alignedSequence) + length) -
referenceLength)
if unwantedRight > 0:
# The query protrudes right. Copy its left part.
alignedSequence += query[
queryIndex:queryIndex + length - unwantedRight]
alignedQuality += quality[
queryIndex:queryIndex + length - unwantedRight]
else:
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
elif operation == CHARD_CLIP:
# Some bases have been completely removed from the query.
# This (H) can only be present as the first and/or last
# operation. There is nothing to do as the bases are simply
# not present in the query string in the SAM/BAM file.
pass
elif operation == CPAD:
# This is "silent deletion from the padded reference",
# which consumes neither query nor reference.
atStart = False
else:
raise ValueError('Unknown CIGAR operation:', operation)
if operation in CONSUMES_QUERY:
queryIndex += length
if operation in CONSUMES_REFERENCE:
referenceIndex += length
if queryIndex != len(query):
# Oops, we did not consume the entire query.
raise ValueError(
'Query %r not fully consumed when parsing CIGAR string. '
'Query %r (len %d), final query index %d, CIGAR: %r' %
(alignment.query_name, query, len(query), queryIndex,
alignment.cigartuples))
# We cannot test we consumed the entire reference. The CIGAR
# string applies to (and exhausts) the query but is silent
# about the part of the reference that lies to the right of the
# aligned query.
# Put gap characters before and after the aligned sequence so that
# it is offset properly and matches the length of the reference.
padRightLength = (referenceLength -
(referenceStart + len(alignedSequence)))
paddedSequence = (padChar * referenceStart +
alignedSequence +
padChar * padRightLength)
paddedQuality = (unknownQualityChar * referenceStart +
alignedQuality +
unknownQualityChar * padRightLength)
read = Read(queryId, paddedSequence, paddedQuality)
if addAlignment:
read.alignment = alignment
yield read
class DistanceMatrix:
"""
Maintain score (matching) information between reads and reference sequences
as read from a SAM file (or files) and provide methods for calculating
distance between references.
"""
def __init__(self):
# self.scores is a similarity matrix between references and
# queries from a SAM file. The values are 1.0 or 0.0 when queries
# match / do not match references unless we are using alignment
# scores, as generated by tools like bowtie2 and bwa and stored in
# a SAM tag. In that case, the alignment score value is stored (see
# https://en.wikipedia.org/wiki/SAM_(file_format)#Optional_fields
# for SAM tag info).
#
# The dictionary is keyed by str reference ids and the values are
# dicts keyed by str query ids. The values of the sub-dictionaries
# are the float scores (similarities). E.g.
#
# {
# 'reference-1': {
# 'query-1': 77.5,
# 'query-2': 37.0,
# },
# 'reference-2': {
# 'query-1': 78.0,
# },
# }
#
# If a reference/query id pair is not present in self.scores, it
# means that query did not match that reference. The score method
# (below) will assign such a pair a similarity score of 0.0.
self.scores = {}
def addFile(self, filename, scoreTag=None):
"""
Add information from a SAM file.
@param filename: A C{str} SAM file name to read.
@param scoreTag: A C{str} alignment score tag to use instead of binary
match / non-match values that are used if C{scoreTag} is C{None}.
The score tag must be present in the optional SAM fields.
"""
if scoreTag:
def getScore(alignment, filename, count):
"""
Get the alignment score from a SAM alignment.
@param alignment: A samtools alignment.
@param filename: The C{str} SAM file name being read.
@param count: The C{int} line number in the SAM file.
"""
try:
score = alignment.get_tag(scoreTag)
except KeyError:
raise ValueError(
f'Alignment {count} in {filename!r} has no '
f'{scoreTag!r} score tag.')
else:
if score < 0.0:
raise ValueError(
f'Alignment {count} in {filename!r} has tag '
f'{scoreTag!r} with negative value ({score}).')
return score
else:
def getScore(*args):
"""
Return a binary score of 1.0 seeing as we know the query was
mapped and no score tag is in use.
@param args: ignored.
"""
return 1.0
scores = self.scores
with samfile(filename) as samAlignment:
for count, alignment in enumerate(samAlignment.fetch(), start=1):
if alignment.is_unmapped:
continue
queryId = alignment.query_name
referenceId = alignment.reference_name
score = getScore(alignment, filename, count)
try:
preExisting = scores[referenceId][queryId]
except KeyError:
if referenceId not in scores:
scores[referenceId] = {}
scores[referenceId][queryId] = score
else:
if score > preExisting:
scores[referenceId][queryId] = score
def score(self, referenceId, queryId):
"""
Get the score for a reference and query.
Although this method name does not start with an underscore, it's not
likely to be used directly when this class is used to calculate
distances between references according to query matching. The scores
between references and queries (returned by this function) are just
a component of the overall distance.
@param referenceId: A C{str} reference id.
@param queryId: A C{str} query id.
@return: A C{float} score (zero if there was no match between the query
and reference) or if the reference id or query id have never been
seen. This could arguably cause a KeyError, but I decided against
that to keep the code simpler / faster. If you pass in unknown
query or reference ids, that's your problem (this should be very
unlikely because our caller is operating on a SAM file, so the
place they're most likely to be getting query and reference ids
from to pass us is from that same SAM file.
"""
try:
return self.scores[referenceId][queryId]
except KeyError:
return 0.0
def jaccardDistance(self, referenceId1, referenceId2):
"""
Get the Jaccard distance between two references. See
https://en.wikipedia.org/wiki/Jaccard_index
@param referenceId1: A C{str} reference id.
@param referenceId2: A C{str} reference id.
@return: A C{float} distance.
"""
queryIds1 = set(self.scores.get(referenceId1, []))
queryIds2 = set(self.scores.get(referenceId2, []))
denominator = len(queryIds1 | queryIds2)
if denominator:
numerator = len(queryIds1 & queryIds2)
similarity = numerator / denominator
distance = 1.0 - similarity
return distance
else:
# The references have no matching queries in common.
return 1.0
def soergelDistance(self, referenceId1, referenceId2):
"""
Get the Soergel (i.e., weighted Jaccard) distance between two
references. See https://en.wikipedia.org/wiki/Jaccard_index
Note that this will be the same as the jaccardDistance unless a
scoreTag was used in reading the SAM file.
@param referenceId1: A C{str} reference id.
@param referenceId2: A C{str} reference id.
@return: A C{float} distance.
"""
queryIds1 = set(self.scores.get(referenceId1, []))
queryIds2 = set(self.scores.get(referenceId2, []))
score = self.score
scores = [
(score(referenceId1, queryId), score(referenceId2, queryId))
for queryId in (queryIds1 | queryIds2)]
denominator = sum(max(a, b) for a, b in scores)
if denominator:
numerator = sum(min(a, b) for a, b in scores)
similarity = numerator / denominator
distance = 1.0 - similarity
return distance
else:
# Neither reference had a positive match against any query.
return 1.0
def matrix(self, referenceIds=None, metric='soergel', similarity=False,
returnDict=False):
"""
Compute a distance matrix.
@param referenceIds: An iterable of C{str} reference ids. If C{None},
all known reference ids will be used.
@param metric: A C{str}, either 'soergel' or 'jaccard'.
@param similarity: If C{True}, return a similarity matrix.
@param returnDict: If C{True}, return a C{dict} (see @return, below).
@return: If C{returnDict} is C{False}, return a symmetric square
C{np.array} of C{float} inter-reference distances (or similarities
if C{similarity} is C{True}) in the range (0.0, 1.0). The order of
the entries in the rows and columns of the returned array matches
that of C{referenceIds} (if given) or the order in C{self.scores}
otherwise. If C{returnDict} is C{True}, return a C{dict} of
C{dict}s, indexed by the two reference ids, with values as above.
"""
assert metric in {'jaccard', 'soergel'}
referenceIds = tuple(referenceIds or self.scores)
nIds = len(referenceIds)
matrix = defaultdict(dict) if returnDict else np.empty((nIds, nIds))
identityValue = 1.0 if similarity else 0.0
func = (self.jaccardDistance if metric == 'jaccard' else
self.soergelDistance)
if similarity:
def metricFunc(ref1, ref2):
return 1.0 - func(ref1, ref2)
else:
def metricFunc(ref1, ref2):
return func(ref1, ref2)
for index1, ref1 in enumerate(referenceIds):
for index2, ref2 in enumerate(referenceIds):
i, j = (ref1, ref2) if returnDict else (index1, index2)
value = identityValue if i == j else metricFunc(ref1, ref2)
matrix[i][j] = matrix[j][i] = value
return matrix
def save(self, fp):
"""
Save the similarity matrix as JSON.
@param fp: An open file pointer to write to.
"""
dump(self.scores, fp, sort_keys=True, indent=4)
def load(self, fp):
"""
Load a similarity matrix from a JSON file.
@param fp: An open file pointer to read from.
"""
self.scores = load(fp)
def getReferenceInfo(bam, bamFilename, bamId=None, referenceFasta=None,
fastaId=None, quiet=False):
"""
Get the id and length of a BAM file reference seqeunce.
@param bam: An open BAM file.
@param bamFilename: The C{str} file name of the BAM file.
@param bamId: A C{str} BAM file reference name indicating which aligned
reads to make a consensus from. If not given, will be inferred
from the BAM file header.
@param referenceFasta: A C{str} file name containing the sequence that was
aligned to in making the BAM file.
@param fastaId: A C{str} reference name indicating which sequence in
C{referenceFasta} to use as a reference. Only considered if
C{referenceFasta} is given. If not given and C{referenceFasta} is,
the reference id will be inferred from reference names in the BAM
header, or will be taken as the id of the first sequence in
C{referenceFasta}.
@param quiet: If C{True}, suppress diagnostic output.
@raise UnknownReference: For an unknown reference.
@raise UnspecifiedReference: If a reference is not given and cannot be
inferred.
@raise ReferenceNameMismatchError: If the names of the reference in the BAM
file and the FASTA reference file (if given) do not match (unless both
are explicitly given, in which case mismatching names are allowed as
long as the sequence lengths are the same).
@raise UnequalReferenceLengthError: If the lengths of the reference in the
BAM file and the FASTA reference file (if given) do not match
@return: A 2-tuple of C{str}, C{int} giving the refrence sequence id and
length.
"""
bamReferences = set(samReferences(bam))
checkName = checkLength = True
inferredBamId = bamId is None
inferredFastaId = fastaId is None
referenceLength = None
if bamId is None:
inferredBamId = True
else:
if bamId not in bamReferences:
raise UnknownReference(
f'BAM file {str(bamFilename)!r} does not mention a reference '
f'with id {bamId!r}. Known references are: '
f'{", ".join(sorted(bamReferences))}.')
inferredBamId = False
tid = bam.get_tid(bamId)
referenceLength = bam.lengths[tid]
if fastaId is not None:
# Both ids have been given explicitly, so don't check that they
# are identical, assume the user knows what they are doing (we
# will just check the lengths, if necessary).
checkName = False
if referenceFasta is None:
reference = None
else:
if fastaId is None:
# We're given a reference FASTA file, but no id to use. Look at
# the sequences in the FASTA and take the first one that has a
# name and length matching a BAM reference (we could actually
# look to see if there are more than one and, if so, give an
# error if their sequences are not identical).
firstRead = None
for read in FastaReads(referenceFasta):
if firstRead is None:
firstRead = read
if read.id in bamReferences:
tid = bam.get_tid(read.id)
if len(read) == bam.lengths[tid]:
reference = read
checkLength = checkName = False
break
else:
# Try the first sequence in the FASTA file.
reference = firstRead
if firstRead is None:
raise UnknownReference(
f'The FASTA reference file {str(referenceFasta)!r} '
f'contained no sequences.')
else:
for read in FastaReads(referenceFasta):
if read.id == fastaId:
reference = read
break
else:
raise UnknownReference(f'No sequence with id {fastaId!r} '
f'found in {str(referenceFasta)!r}.')
# Set reference length if it has not already been set due to a
# given bamId.
if referenceLength is None:
referenceLength = len(reference)
if bamId is None:
if len(bamReferences) == 1:
# This is the only possibility.
bamId = tuple(bamReferences)[0]
tid = bam.get_tid(bamId)
referenceLength = bam.lengths[tid]
elif reference and reference.id in bamReferences:
tid = bam.get_tid(reference.id)
if len(reference) == bam.lengths[tid]:
bamId = reference.id
checkLength = checkName = False
else:
raise UnspecifiedReference(
f'Could not infer a BAM reference. Available references are: '
f'{", ".join(sorted(bamReferences))}.')
if inferredBamId and not quiet:
print(f'BAM reference id {bamId!r} inferred from context.',
file=sys.stderr)
if reference:
if inferredFastaId and not quiet:
print(f'FASTA reference id {reference.id!r} inferred from '
f'context.', file=sys.stderr)
if checkName and reference.id != bamId:
raise ReferenceNameMismatchError(
f'Reference FASTA sequence name {reference.id!r} does '
f'not match the BAM id {bamId!r}.')
if checkLength and len(reference) != referenceLength:
raise UnequalReferenceLengthError(
f'Reference FASTA sequence {reference.id!r} has length '
f'{len(reference)}, but the BAM reference {bamId!r} has '
f'length {referenceLength}.')
return bamId, reference, referenceLength
|
acorg/dark-matter
|
dark/sam.py
|
Python
|
mit
| 48,541
|
[
"BWA",
"pysam"
] |
77a1d05680644447867155e2f38b460ca38ce84067ad054eaba085f53fe64dda
|
#!/usr/bin/env python
# GM-PHD implementation in python by Dan Stowell.
# Based on the description in Vo and Ma (2006).
# (c) 2012 Dan Stowell and Queen Mary University of London.
# All rights reserved.
#
# NOTE: I AM NOT IMPLEMENTING SPAWNING, since I don't need it.
# It would be straightforward to add it - see the original paper for how-to.
"""
This file is part of gmphd, GM-PHD filter in python by Dan Stowell.
gmphd is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
gmphd is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with gmphd. If not, see <http://www.gnu.org/licenses/>.
"""
simplesum = sum # we want to be able to use "pure" sum not numpy (shoulda namespaced)
from numpy import *
import numpy.linalg
from copy import deepcopy
from operator import attrgetter
myfloat = float64
class GmphdComponent:
"""Represents a single Gaussian component,
with a float weight, vector location, matrix covariance.
Note that we don't require a GM to sum to 1, since not always about proby densities."""
def __init__(self, weight, loc, cov):
self.weight = myfloat(weight)
self.loc = array(loc, dtype=myfloat, ndmin=2)
self.cov = array(cov, dtype=myfloat, ndmin=2)
self.loc = reshape(self.loc, (size(self.loc), 1)) # enforce column vec
self.cov = reshape(self.cov, (size(self.loc), size(self.loc))) # ensure shape matches loc shape
# precalculated values for evaluating gaussian:
k = len(self.loc)
self.dmv_part1 = (2.0 * pi) ** (-k * 0.5)
self.dmv_part2 = power(numpy.linalg.det(self.cov), -0.5)
self.invcov = numpy.linalg.inv(self.cov)
def dmvnorm(self, x):
"""Evaluate this multivariate normal component, at a location x.
NB this does NOT APPLY THE WEIGHTING, simply for API similarity to the other method with this name."""
x = array(x, dtype=myfloat)
dev = x - self.loc
part3 = exp(-0.5 * dot(dot(dev.T, self.invcov), dev))
return self.dmv_part1 * self.dmv_part2 * part3
# We don't always have a GmphdComponent object so:
def dmvnorm(loc, cov, x):
"Evaluate a multivariate normal, given a location (vector) and covariance (matrix) and a position x (vector) at which to evaluate"
loc = array(loc, dtype=myfloat)
cov = array(cov, dtype=myfloat)
x = array(x, dtype=myfloat)
k = len(loc)
part1 = (2.0 * pi) ** (-k * 0.5)
part2 = power(numpy.linalg.det(cov), -0.5)
dev = x - loc
part3 = exp(-0.5 * dot(dot(dev.T, numpy.linalg.inv(cov)), dev))
return part1 * part2 * part3
def sampleGm(complist):
"Given a list of GmphdComponents, randomly samples a value from the density they represent"
weights = array([x.weight for x in complist])
weights = weights / simplesum(weights) # Weights aren't externally forced to sum to one
choice = random.random()
cumulative = 0.0
for i,w in enumerate(weights):
cumulative += w
if choice <= cumulative:
# Now we sample from the chosen component and return a value
comp = complist[i]
return random.multivariate_normal(comp.loc.flat, comp.cov)
raise RuntimeError("sampleGm terminated without choosing a component")
################################################################################
class Gmphd:
"""Represents a set of modelling parameters and the latest frame's
GMM estimate, for a GM-PHD model without spawning.
Typical usage would be, for each frame of input data, to run:
g.update(obs)
g.prune()
estimate = g.extractstates()
'gmm' is an array of GmphdComponent items which makes up
the latest GMM, and updated by the update() call.
It is initialised as empty.
Test code example (1D data, with new trails expected at around 100):
from gmphd import *
g = Gmphd([GmphdComponent(1, [100], [[10]])], 0.9, 0.9, [[1]], [[1]], [[1]], [[1]], 0.000002)
g.update([[30], [67.5]])
g.gmmplot1d()
g.prune()
g.gmmplot1d()
g.gmm
[(float(comp.loc), comp.weight) for comp in g.gmm]
"""
def __init__(self, birthgmm, survival, detection, f, q, h, r, clutter):
"""
'birthgmm' is an array of GmphdComponent items which makes up
the GMM of birth probabilities.
'survival' is survival probability.
'detection' is detection probability.
'f' is state transition matrix F.
'q' is the process noise covariance Q.
'h' is the observation matrix H.
'r' is the observation noise covariance R.
'clutter' is the clutter intensity.
"""
self.gmm = [] # empty - things will need to be born before we observe them
self.birthgmm = birthgmm
self.survival = myfloat(survival) # p_{s,k}(x) in paper
self.detection = myfloat(detection) # p_{d,k}(x) in paper
self.f = array(f, dtype=myfloat) # state transition matrix (F_k-1 in paper)
self.q = array(q, dtype=myfloat) # process noise covariance (Q_k-1 in paper)
self.h = array(h, dtype=myfloat) # observation matrix (H_k in paper)
self.r = array(r, dtype=myfloat) # observation noise covariance (R_k in paper)
self.clutter = myfloat(clutter) # clutter intensity (KAU in paper)
def update(self, obs):
"""Run a single GM-PHD step given a new frame of observations.
'obs' is an array (a set) of this frame's observations.
Based on Table 1 from Vo and Ma paper."""
#######################################
# Step 1 - prediction for birth targets
born = [deepcopy(comp) for comp in self.birthgmm]
# The original paper would do a spawning iteration as part of Step 1.
spawned = [] # not implemented
#######################################
# Step 2 - prediction for existing targets
updated = [GmphdComponent( \
self.survival * comp.weight, \
dot(self.f, comp.loc), \
self.q + dot(dot(self.f, comp.cov), self.f.T) \
) for comp in self.gmm]
predicted = born + spawned + updated
#######################################
# Step 3 - construction of PHD update components
# These two are the mean and covariance of the expected observation
nu = [dot(self.h, comp.loc) for comp in predicted]
s = [self.r + dot(dot(self.h, comp.cov), self.h.T) for comp in predicted]
# Not sure about any physical interpretation of these two...
k = [dot(dot(comp.cov, self.h.T), linalg.inv(s[index]))
for index, comp in enumerate(predicted)]
pkk = [dot(eye(len(k[index])) - dot(k[index], self.h), comp.cov)
for index, comp in enumerate(predicted)]
#######################################
# Step 4 - update using observations
# The 'predicted' components are kept, with a decay
newgmm = [GmphdComponent(comp.weight * (1.0 - self.detection), comp.loc, comp.cov) for comp in predicted]
# then more components are added caused by each obsn's interaction with existing component
for anobs in obs:
anobs = array(anobs)
newgmmpartial = []
for j, comp in enumerate(predicted):
newgmmpartial.append(GmphdComponent( \
self.detection * comp.weight \
* dmvnorm(nu[j], s[j], anobs), \
comp.loc + dot(k[j], anobs - nu[j]), \
comp.cov \
))
# The Kappa thing (clutter and reweight)
weightsum = simplesum(newcomp.weight for newcomp in newgmmpartial)
reweighter = 1.0 / (self.clutter + weightsum)
for newcomp in newgmmpartial:
newcomp.weight *= reweighter
newgmm.extend(newgmmpartial)
self.gmm = newgmm
def prune(self, truncthresh=1e-6, mergethresh=0.01, maxcomponents=100):
"""Prune the GMM. Alters model state.
Based on Table 2 from Vo and Ma paper."""
# Truncation is easy
weightsums = [simplesum(comp.weight for comp in self.gmm)] # diagnostic
sourcegmm = [comp for comp in self.gmm if comp.weight > truncthresh]
weightsums.append(simplesum(comp.weight for comp in sourcegmm))
origlen = len(self.gmm)
trunclen = len(sourcegmm)
# Iterate to build the new GMM
newgmm = []
while len(sourcegmm) > 0:
# find weightiest old component and pull it out
windex = argmax(comp.weight for comp in sourcegmm)
weightiest = sourcegmm[windex]
sourcegmm = sourcegmm[:windex] + sourcegmm[windex+1:]
# find all nearby ones and pull them out
distances = [float(dot(dot((comp.loc - weightiest.loc).T, comp.invcov), comp.loc - weightiest.loc)) for comp in sourcegmm]
dosubsume = array([dist <= mergethresh for dist in distances])
subsumed = [weightiest]
if any(dosubsume):
#print "Subsuming the following locations into weightest with loc %s and weight %g (cov %s):" \
# % (','.join([str(x) for x in weightiest.loc.flat]), weightiest.weight, ','.join([str(x) for x in weightiest.cov.flat]))
#print list([comp.loc[0][0] for comp in list(array(sourcegmm)[ dosubsume]) ])
subsumed.extend( list(array(sourcegmm)[ dosubsume]) )
sourcegmm = list(array(sourcegmm)[~dosubsume])
# create unified new component from subsumed ones
aggweight = simplesum(comp.weight for comp in subsumed)
newcomp = GmphdComponent( \
aggweight,
sum(array([comp.weight * comp.loc for comp in subsumed]), 0) / aggweight,
sum(array([comp.weight * (comp.cov + (weightiest.loc - comp.loc) \
* (weightiest.loc - comp.loc).T) for comp in subsumed]), 0) / aggweight
)
newgmm.append(newcomp)
# Now ensure the number of components is within the limit, keeping the weightiest
newgmm.sort(key=attrgetter('weight'))
newgmm.reverse()
self.gmm = newgmm[:maxcomponents]
weightsums.append(simplesum(comp.weight for comp in newgmm))
weightsums.append(simplesum(comp.weight for comp in self.gmm))
print("prune(): %i -> %i -> %i -> %i" % (origlen, trunclen, len(newgmm), len(self.gmm)))
print("prune(): weightsums %g -> %g -> %g -> %g" % (weightsums[0], weightsums[1], weightsums[2], weightsums[3]))
# pruning should not alter the total weightsum (which relates to total num items) - so we renormalise
weightnorm = weightsums[0] / weightsums[3]
for comp in self.gmm:
comp.weight *= weightnorm
def extractstates(self, bias=1.0):
"""Extract the multiple-target states from the GMM.
Returns a list of target states; doesn't alter model state.
Based on Table 3 from Vo and Ma paper.
I added the 'bias' factor, by analogy with the other method below."""
items = []
print("weights:")
print([round(comp.weight, 7) for comp in self.gmm])
for comp in self.gmm:
val = comp.weight * float(bias)
if val > 0.5:
for _ in range(int(round(val))):
items.append(deepcopy(comp.loc))
for x in items: print(x.T)
return items
def extractstatesusingintegral(self, bias=1.0):
"""Extract states based on the expected number of states from the integral of the intensity.
This is NOT in the GMPHD paper; added by Dan.
"bias" is a multiplier for the est number of items.
"""
numtoadd = int(round(float(bias) * simplesum(comp.weight for comp in self.gmm)))
print("bias is %g, numtoadd is %i" % (bias, numtoadd))
items = []
# A temporary list of peaks which will gradually be decimated as we steal from its highest peaks
peaks = [{'loc':comp.loc, 'weight':comp.weight} for comp in self.gmm]
while numtoadd > 0:
windex = 0
wsize = 0
for which, peak in enumerate(peaks):
if peak['weight'] > wsize:
windex = which
wsize = peak['weight']
# add the winner
items.append(deepcopy(peaks[windex]['loc']))
peaks[windex]['weight'] -= 1.0
numtoadd -= 1
for x in items: print(x.T)
return items
########################################################################################
def gmmeval(self, points, onlydims=None):
"""Evaluates the GMM at a supplied list of points (full dimensionality).
'onlydims' if not nil, marginalises out (well, ignores) the nonlisted dims. All dims must still be listed in the points, so put zeroes in."""
return [ \
simplesum(comp.weight * comp.dmvnorm(p) for comp in self.gmm) \
for p in points]
def gmmeval1d(self, points, whichdim=0):
"Evaluates the GMM at a supplied list of points (1D only)"
return [ \
simplesum(comp.weight * dmvnorm([comp.loc[whichdim]], [[comp.cov[whichdim][whichdim]]], p) for comp in self.gmm) \
for p in points]
def gmmevalgrid1d(self, span=None, gridsize=200, whichdim=0):
"Evaluates the GMM on a uniformly-space grid of points (1D only)"
if span==None:
locs = array([comp.loc[whichdim] for comp in self.gmm])
span = (min(locs), max(locs))
grid = (arange(gridsize, dtype=float) / (gridsize-1)) * (span[1] - span[0]) + span[0]
return self.gmmeval1d(grid, whichdim)
def gmmevalalongline(self, span=None, gridsize=200, onlydims=None):
"""Evaluates the GMM on a uniformly-spaced line of points (i.e. a 1D line, though can be angled).
'span' must be a list of (min, max) for each dimension, over which the line will iterate.
'onlydims' if not nil, marginalises out (well, ignores) the nonlisted dims. All dims must still be listed in the spans, so put zeroes in."""
if span==None:
locs = array([comp.loc for comp in self.gmm]).T # note transpose - locs not a list of locations but a list of dimensions
span = array([ map(min,locs), map(max,locs) ]).T # note transpose - span is an array of (min, max) for each dim
else:
span = array(span)
steps = (arange(gridsize, dtype=float) / (gridsize-1))
grid = array(map(lambda aspan: steps * (aspan[1] - aspan[0]) + aspan[0], span)).T # transpose back to list of state-space points
return self.gmmeval(grid, onlydims)
def gmmplot1d(self, gridsize=200, span=None, obsnmatrix=None):
"Plots the GMM. Only works for 1D model."
import matplotlib.pyplot as plt
vals = self.gmmevalgrid1d(span, gridsize, obsnmatrix)
fig = plt.figure()
plt.plot(grid, vals, '-')
fig.show()
return fig
|
danstowell/gmphd
|
gmphd.py
|
Python
|
gpl-3.0
| 14,138
|
[
"Gaussian"
] |
a191f6c30b0122b01b04a97738240df416e236367a4d660bf40b4c9216679cc0
|
import ast
import astunparse
import inspect
from collections import OrderedDict
from functools import update_wrapper
from types import FunctionType, MethodType
from .exceptions import ArgumentError, RewritingError
from .matching import Var, push_context, matches, var
undefined = object()
class generator(object):
def __init__(self, fn):
self._fn = fn
# Get the codomain of the generator from its annotations.
annotations = dict(fn.__annotations__)
try:
self.codomain = annotations.pop('return')
except KeyError:
raise SyntaxError('undefined codomain for %s()' % fn.__qualname__)
# Get the domain of the generator from its annotations.
parameters = inspect.signature(fn).parameters
self.domain = OrderedDict([(name, annotations[name]) for name in parameters])
def __get__(self, instance, owner=None):
if instance is None:
return self
new_fn = self._fn.__get__(instance, owner)
return self.__class__(new_fn)
def __call__(self, *args, **kwargs):
rv = self.codomain()
rv._generator = self
# If the domain of the generator is empty, make sure no argument
# were passed to the function.
if len(self.domain) == 0:
if (len(args) > 0) or (len(kwargs) > 0):
raise ArgumentError('%s() takes no arguments' % self._fn.__qualname__)
return rv
# Allow to call generators with a single positional argument.
if len(args) > 1:
raise ArgumentError('use of multiple positional arguments is forbidden')
if (len(self.domain) == 1) and (len(args) == 1):
kwargs.update([(list(self.domain.keys())[0], args[0])])
# Look for the generator arguments.
rv._generator_args = {}
missing = []
for name, sort in self.domain.items():
try:
value = kwargs[name]
except KeyError:
missing.append(name)
continue
if not (isinstance(value, Var) or isinstance(value, sort)):
raise ArgumentError(
"'%s' should be a variable or a term of sort '%s'" %
(name, sort.__sortname__))
rv._generator_args[name] = kwargs[name]
if len(missing) > 0:
raise ArgumentError(
'%s() missing argument(s): %s' % (self._fn.__qualname__, ', '.join(missing)))
return rv
def __str__(self):
domain = ', '.join(
'%s:%s' % (name, sort.__sortname__) for name, sort in self.domain.items())
return '(%s) -> %s' % (domain, self.codomain.__sortname__)
class attr_constructor(generator):
def __call__(self, *args, **kwargs):
return self.codomain(*args, **kwargs)
class operation(generator):
def __init__(self, fn):
super().__init__(fn)
if not hasattr(fn, '_original'):
self._rewrite_fn(fn)
def __get__(self, instance, owner=None):
if instance is None:
return self
new_mtd = MethodType(self._prepare_fn(), instance)
return self.__class__(new_mtd)
def __call__(self, *args, **kwargs):
# TODO Type checking
if inspect.ismethod(self._fn):
fn = self._fn
else:
fn = self._prepare_fn()
try:
rv = fn(*args, **kwargs)
except Exception as e:
# Inspect where the original function was defined so we can raise
# a more helpful exception.
source_file = inspect.getsourcefile(self._fn._original)
source_line = inspect.getsourcelines(self._fn._original)[1]
raise RewritingError(
'%(file)s, in %(fn)s (line %(line)s)\n%(error)s: %(message)s' % {
'file': source_file,
'line': source_line,
'fn': self._fn.__qualname__,
'error': e.__class__.__name__,
'message': str(e)
}) from e
if rv is None:
raise RewritingError('failed to apply %s()' % self._fn.__qualname__)
return rv
def _rewrite_fn(self, fn):
# Rewrite the operation so that its if statements are wrapped within a
# matching context.
node = ast.parse(_unindent(inspect.getsource(fn)))
node = _RewriteOperation().visit(node)
src = astunparse.unparse(node)
exec(compile(src, filename='', mode='exec'))
self._fn = locals()['_fn']
update_wrapper(self._fn, fn)
self._fn.__qualname__ = fn.__qualname__
self._fn._original = fn
self._fn._nonlocals = inspect.getclosurevars(self._fn._original).nonlocals
def _prepare_fn(self):
# Inject push_context into the function scope.
fn_globals = dict(self._fn._original.__globals__)
fn_globals['push_context'] = push_context
# Inject non-local variables of the original function into the
# function scope.
fn_globals.update(self._fn._nonlocals)
f = FunctionType(self._fn.__code__, fn_globals)
return update_wrapper(f, self._fn)
class Attribute(object):
def __init__(self, domain, default=None):
self.domain = domain
self.default = default
class SortBase(type):
recursive_reference = object()
@classmethod
def __prepare__(metacls, name, bases, **kwargs):
# To handle recursive references in sort definitions, we need to get
# the class in which functions that use such references we defined
# lazily. However, since sorts can be dynamically specialized, we
# can't simply inject the reference to the newly created class just
# after we built it in the metaclass, otherwise inherited class will
# wrongly reference their parent.
# So as to tackle this issue, we set those recursive references to
# `SortBase.recursive_reference`, so that we can later compute the
# class it should refer to.
return {name: SortBase.recursive_reference}
def __new__(cls, classname, bases, attrs):
# Register class attributes, generators and operations
sort_attributes = []
sort_generators = []
sort_operations = []
for name, attr in attrs.items():
if isinstance(attr, Attribute):
sort_attributes.append(name)
if isinstance(attr, generator):
if isinstance(attr, operation):
sort_operations.append(name)
else:
sort_generators.append(name)
attrs['__attributes__'] = tuple(sort_attributes)
attrs['__generators__'] = tuple(sort_generators)
attrs['__operations__'] = tuple(sort_operations)
# If the sort has attributes, create an attribute constructor and an
# attribute accessor for each attribute.
if sort_attributes:
parameters = ', '.join(
'%s: %s' % (name, attrs[name].domain.__name__) for name in sort_attributes)
src = 'def __constructor__(%s) -> %s: pass' % (parameters, classname)
scope = {attrs[name].domain.__name__: attrs[name].domain for name in sort_attributes}
scope[classname] = SortBase.recursive_reference
eval_locals = {}
eval(compile(src, filename='<null>', mode='exec'), scope, eval_locals)
attrs['__attr_constructor__'] = attr_constructor(eval_locals['__constructor__'])
for attribute_name in sort_attributes:
@operation
def accessor(term: SortBase.recursive_reference) -> attrs[attribute_name].domain:
# Note that as we can't have access to the sort class yet,
# it is simpler to defer the definition of its accessors
# semantics until after we'll have created it.
pass
attrs['__get_%s__' % attribute_name] = accessor
# Give a default __sortname__ if none was specified.
if '__sortname__' not in attrs:
attrs['__sortname__'] = classname
# Create the sort class.
new_sort = type.__new__(cls, classname, bases, attrs)
# Define the semantics of the sort accessors.
for attribute_name in sort_attributes:
def accessor(term: new_sort) -> attrs[attribute_name].domain:
with push_context():
args = {name: getattr(var, name) for name in sort_attributes}
if term == new_sort.__attr_constructor__(**args):
return getattr(var, attribute_name)
attr = getattr(new_sort, '__get_%s__' % attribute_name)
attr._rewrite_fn(accessor)
attr._fn.__name__ = '__get_%s__' % attribute_name
# Resolve recursive references in the sort generators and operations.
rr = SortBase.recursive_reference
for name, attr in attrs.items():
if isinstance(attr, generator):
attr.domain.update({
name: new_sort for name, sort in attr.domain.items() if sort is rr})
if attr.codomain is rr:
attr.codomain = new_sort
return new_sort
class Sort(metaclass=SortBase):
def __init__(self, *args, **kwargs):
self._generator = None
self._generator_args = None
# Allow to call generators with a single positional argument.
if len(args) > 1:
raise ArgumentError('Use of multiple positional arguments is forbidden.')
if (len(self.__attributes__) == 1) and (len(args) == 1):
kwargs.update({self.__attributes__[0]: args[0]})
# Initialize the instance attribute.
missing = []
for name in self.__attributes__:
attribute = getattr(self.__class__, name)
value = kwargs.get(name, attribute.default)
if value is None:
missing.append(name)
continue
if not (isinstance(value, Var) or isinstance(value, attribute.domain)):
raise ArgumentError(
"'%s' should be a variable or a term of sort '%s'." %
(name, attribute.domain.__sortname__))
setattr(self, name, value)
if len(missing) > 0:
raise ArgumentError(
'%s() missing argument(s): %s' % (self.__class__.__qualname__, ', '.join(missing)))
@property
def _is_a_constant(self):
return self._generator is not None
def where(self, **kwargs):
return self.__class__(
**{name: kwargs.get(name, getattr(self, name)) for name in self.__attributes__})
@classmethod
def specialize(cls, sortname=None, **implementations):
abstract_names = sorted(implementations.keys())
sortname = sortname or (
cls.__name__ + '_specialized_with_' +
'_'.join(implementations[n].__sortname__ for n in abstract_names))
specialization_dict = dict(cls.__dict__)
specialization_dict['__sortname__'] = sortname
for name in abstract_names:
specialization_dict[name] = implementations[name]
return SortBase(sortname, (cls,), specialization_dict)
def __hash__(self):
if self._is_a_constant:
if self._generator_args is None:
return hash(self._generator)
return hash(
(self._generator, ) +
tuple((name, term) for name, term in self._generator_args.items()))
return hash(tuple((name, getattr(self, name)) for name in self.__attributes__))
def __eq__(self, other):
return matches(self, other)
def equiv(self, other):
if isinstance(other, Var):
return True
if self._is_a_constant:
if (self._generator != other._generator):
return False
if self._generator_args is None:
return True
keys = self._generator_args.keys()
return all(self._generator_args[n] == other._generator_args[n] for n in keys)
return all(getattr(self, name) == getattr(other, name) for name in self.__attributes__)
def __str__(self):
if self._is_a_constant:
if self._generator_args is None:
return self._generator._fn.__qualname__
else:
args = ['%s: %s' % (name, term) for name, term in self._generator_args.items()]
args = ', '.join(args)
return self._generator._fn.__qualname__ + '(' + args + ')'
else:
if len(self.__attributes__) == 0:
return self.__class__.__qualname__
else:
args = ['%s = %s' % (name, getattr(self, name)) for name in self.__attributes__]
args = ', '.join(args)
return self.__class__.__qualname__ + '(' + args + ')'
def __repr__(self):
return repr(str(self))
def _unindent(src):
indentation = len(src) - len(src.lstrip())
return '\n'.join([line[indentation:] for line in src.split('\n')])
class _RewriteOperation(ast.NodeTransformer):
_push_context_call = ast.parse('push_context()').body[0].value
def visit_FunctionDef(self, node):
# We have to rename the function so we're sure its name won't collide
# with a local variable of operation.__init__. We also have to remove
# its annotations, so that we don't need to import the sorts of its
# domain and codomain when we'll recompile it. Finally, we have to
# remove the function decorators so they don't get executed twice.
return self._update(
node,
name='_fn',
args=self._update(
node.args,
args=[self._update(arg, annotation=None) for arg in node.args.args]),
body=[self.visit(child) for child in node.body],
returns=None,
decorator_list=[])
def visit_Return(self, node):
if type(node.value) == ast.IfExp:
return self._wrap(node)
return node
def visit_If(self, node):
return self._wrap(ast.If(
test=node.test,
body=node.body,
orelse=[self.visit(child) for child in node.orelse]))
def _wrap(self, node):
return ast.With(
items=[ast.withitem(context_expr=self._push_context_call, optional_vars=None)],
body=[node])
def _update(self, node, **kwargs):
return type(node)(**{name: kwargs.get(name, getattr(node, name)) for name in node._fields})
|
kyouko-taiga/stew
|
stew/core.py
|
Python
|
apache-2.0
| 14,774
|
[
"VisIt"
] |
082e5fd2ad1dfae32903aabc51b0139cc1660d90ecb10445b4e22865b7ac1b09
|
import numpy as np
from mmd.molecule import Molecule
from mmd.realtime import RealTime
from mmd.utils.spectrum import genSpectra
hydrogen = """
0 1
H 0.0 0.0 0.0
H 0.0 0.0 0.74
"""
# init molecule and build integrals
mol = Molecule(geometry=hydrogen,basis='3-21G')
# do the SCF
mol.RHF()
# define the applied field envelope as a function of time
# here, is is a narrow gaussian envelope centered at t = 0.
def gaussian(t):
return np.exp(-50*(t**2))
# create realtime object, setting parameters and pulse envelopes
rt = RealTime(mol,numsteps=1000,stepsize=0.05,field=0.0001,pulse=gaussian)
# propagate with Magnus2
rt.Magnus2(direction='z')
m2 = rt.dipole
# propagate with Magnus4
rt.Magnus4(direction='z')
m4 = rt.dipole
try:
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
ax1.plot(rt.time,m2,label='Magnus2',color='tab:blue')
ax1.plot(rt.time,m4,label='Magnus4',color='tab:green')
ax1.set_ylabel('z-dipole / Debye',color='tab:blue')
ax1.tick_params(axis='y',labelcolor='tab:blue')
ax1.set_xlabel('Time / au')
ax1.legend(loc=1)
# plot field on separate axis
ax2 = ax1.twinx()
ax2.plot(rt.time,np.asarray(rt.shape)*rt.field,label='Applied field',color='tab:orange')
ax2.set_ylabel('Applied field / au',color='tab:orange')
ax2.tick_params(axis='y',labelcolor='tab:orange')
ax2.legend(loc=2)
fig.tight_layout()
plt.show()
plt.close()
# now plot the absorption spectra S(w) (z-component)
freq, spectra = genSpectra(rt.time,m2,np.asarray(rt.shape)*rt.field)
plt.plot(27.2114*freq,spectra)
plt.xlabel('Energy / eV')
plt.ylabel('$\sigma(\omega)$ / arb. units')
plt.show()
except ImportError:
print('You need matplotlib to plot the time-evolving dipole')
|
jjgoings/McMurchie-Davidson
|
examples/real-time.py
|
Python
|
bsd-3-clause
| 1,780
|
[
"Gaussian"
] |
b54aa1fa71de3cab8b26c05beec2c2f152da2456dcf3fc9bc2165c82479434be
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
""" Module: IntrinsicDistance
=========================
"""
from __future__ import print_function
from . import Observable
class IntrinsicDistance(Observable):
"""Initialize the intrinsic distance calculation.
:param PYTIM interface: compute the intrinsic distance with respect
to this interface
:param str symmetry: force calculation using this symmetry, if
availabe (e.g. 'generic', 'planar', 'spherical')
If 'default', uses the symmetry selected in
the PYTIM interface instance.
Example:
>>> import MDAnalysis as mda
>>> import pytim
>>> import numpy as np
>>> from pytim import observables
>>> from pytim.datafiles import MICELLE_PDB
>>> u = mda.Universe(MICELLE_PDB)
>>> micelle = u.select_atoms('resname DPC')
>>> waterox = u.select_atoms('type O and resname SOL')
>>> inter = pytim.GITIM(u,group=micelle, molecular=False, alpha=2.0)
>>> dist = observables.IntrinsicDistance(interface=inter)
>>> d = dist.compute(waterox)
>>> np.set_printoptions(precision=3,threshold=10)
>>> print(d)
[25.733 8.579 8.852 ... 18.566 13.709 9.876]
>>> np.set_printoptions(precision=None,threshold=None)
"""
def __init__(self, interface, symmetry='default', mode='default'):
Observable.__init__(self, interface.universe)
self.interface = interface
self.mode = mode
if symmetry == 'default':
self.symmetry = self.interface.symmetry
else:
self.symmetry = symmetry
def compute(self, inp, kargs=None):
"""Compute the intrinsic distance of a set of points from the first
layers.
:param ndarray positions: compute the intrinsic distance for this set
of points
"""
# see pytim/surface.py
return self.interface._surfaces[0].distance(inp, self.symmetry, mode=self.mode)
|
Marcello-Sega/pytim
|
pytim/observables/intrinsic_distance.py
|
Python
|
gpl-3.0
| 2,139
|
[
"MDAnalysis"
] |
db9395528bc2fe3e437522ee6501a10e193d4a37514f0d5ecd9696e88ecf3784
|
#
# Brian C. Lane <[email protected]>
#
# Copyright 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import KickstartCommand
from pykickstart.options import KSOptionParser
class RHEL6_UnsupportedHardware(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.unsupported_hardware = kwargs.get("unsupported_hardware", False)
def __str__(self):
retval = KickstartCommand.__str__(self)
if self.unsupported_hardware:
retval += "unsupported_hardware\n"
return retval
def _getParser(self):
op = KSOptionParser()
return op
def parse(self, args):
self.op.parse_args(args=args, lineno=self.lineno)
self.unsupported_hardware = True
return self
|
jikortus/pykickstart
|
pykickstart/commands/unsupported_hardware.py
|
Python
|
gpl-2.0
| 1,844
|
[
"Brian"
] |
6ba1ebe5128f8e94e9aebf93b045066ad07d92a0c2153c6c6f221b0b38d171b6
|
######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Module for the Gaussian-Wishart and similar distributions.
"""
import numpy as np
from scipy import special
from .expfamily import (ExponentialFamily,
ExponentialFamilyDistribution,
useconstructor)
from .gaussian import GaussianMoments
from .gamma import GammaMoments
from .wishart import (WishartMoments,
WishartPriorMoments)
from .node import (Moments,
ensureparents)
from bayespy.utils import random
from bayespy.utils import utils
class GaussianGammaISOMoments(Moments):
"""
Class for the moments of Gaussian-gamma-ISO variables.
"""
def compute_fixed_moments(self, x, alpha):
"""
Compute the moments for a fixed value
`x` is a mean vector.
`alpha` is a precision scale
"""
x = np.asanyarray(x)
alpha = np.asanyarray(alpha)
u0 = np.einsum('...,...i->...i', alpha, x)
u1 = np.einsum('...,...i,...j->...ij', alpha, x, x)
u2 = np.copy(alpha)
u3 = np.log(alpha)
u = [u0, u1, u2, u3]
return u
def compute_dims_from_values(self, x, alpha):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(x) < 1:
raise ValueError("Mean must be a vector")
D = np.shape(x)[-1]
return ( (D,), (D,D), (), () )
class GaussianGammaARDMoments(Moments):
"""
Class for the moments of Gaussian-gamma-ARD variables.
"""
def compute_fixed_moments(self, x, alpha):
"""
Compute the moments for a fixed value
`x` is a mean vector.
`alpha` is a precision scale
"""
x = np.asanyarray(x)
alpha = np.asanyarray(alpha)
if np.ndim(x) < 1:
raise ValueError("Mean must be a vector")
if np.ndim(alpha) < 1:
raise ValueError("ARD scales must be a vector")
if np.shape(x)[-1] != np.shape(alpha)[-1]:
raise ValueError("Mean and ARD scales have inconsistent shapes")
u0 = np.einsum('...i,...i->...i', alpha, x)
u1 = np.einsum('...k,...k,...k->...k', alpha, x, x)
u2 = np.copy(alpha)
u3 = np.log(alpha)
u = [u0, u1, u2, u3]
return u
def compute_dims_from_values(self, x, alpha):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(x) < 1:
raise ValueError("Mean must be a vector")
if np.ndim(alpha) < 1:
raise ValueError("ARD scales must be a vector")
D = np.shape(x)[-1]
if np.shape(alpha)[-1] != D:
raise ValueError("Mean and ARD scales have inconsistent shapes")
return ( (D,), (D,), (D,), (D,) )
class GaussianWishartMoments(Moments):
"""
Class for the moments of Gaussian-Wishart variables.
"""
def compute_fixed_moments(self, x, Lambda):
"""
Compute the moments for a fixed value
`x` is a vector.
`Lambda` is a precision matrix
"""
x = np.asanyarray(x)
Lambda = np.asanyarray(Lambda)
u0 = np.einsum('...ik,...k->...i', Lambda, x)
u1 = np.einsum('...i,...ij,...j->...', x, Lambda, x)
u2 = np.copy(Lambda)
u3 = linalg.logdet_cov(Lambda)
return [u0, u1, u2, u3]
def compute_dims_from_values(self, x, Lambda):
"""
Return the shape of the moments for a fixed value.
"""
if np.ndim(x) < 1:
raise ValueError("Mean must be a vector")
if np.ndim(Lambda) < 2:
raise ValueError("Precision must be a matrix")
D = np.shape(x)[-1]
if np.shape(Lambda)[-2:] != (D,D):
raise ValueError("Mean vector and precision matrix have "
"inconsistent shapes")
return ( (D,), (), (D,D), () )
class GaussianGammaISODistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of Gaussian-Gamma-ISO variables.
"""
def compute_message_to_parent(self, parent, index, u, u_mu_Lambda, u_a, u_b):
"""
Compute the message to a parent node.
"""
if index == 0:
raise NotImplementedError()
elif index == 1:
raise NotImplementedError()
elif index == 2:
raise NotImplementedError()
else:
raise ValueError("Index out of bounds")
def compute_phi_from_parents(self, u_mu_Lambda, u_a, u_b, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
raise NotImplementedError()
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
raise NotImplementedError()
return (u, g)
def compute_cgf_from_parents(self, u_mu_Lambda, u_a, u_b):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
raise NotImplementedError()
return g
def compute_fixed_moments_and_f(self, x, alpha, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
raise NotImplementedError()
return (u, f)
class GaussianWishartDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of Gaussian-Wishart variables.
"""
def compute_message_to_parent(self, parent, index, u, u_mu, u_alpha, u_V, u_n):
"""
Compute the message to a parent node.
"""
if index == 0:
raise NotImplementedError()
elif index == 1:
raise NotImplementedError()
elif index == 2:
raise NotImplementedError()
elif index == 3:
raise NotImplementedError()
else:
raise ValueError("Index out of bounds")
def compute_phi_from_parents(self, u_mu, u_alpha, u_V, u_n, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
raise NotImplementedError()
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
raise NotImplementedError()
return (u, g)
def compute_cgf_from_parents(self, u_mu, u_alpha, u_V, u_n):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
raise NotImplementedError()
return g
def compute_fixed_moments_and_f(self, x, Lambda, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
raise NotImplementedError()
return (u, f)
class GaussianWishart(ExponentialFamily):
"""
Node for Gaussian-Wishart random variables.
The prior:
.. math::
p(x, \Lambda| \mu, \alpha, V, n)
p(x|\Lambda, \mu, \alpha) = \mathcal(N)(x | \mu, \alpha^{-1} Lambda^{-1})
p(\Lambda|V, n) = \mathcal(W)(\Lambda | n, V)
The posterior approximation :math:`q(x, \Lambda)` has the same Gaussian-Wishart form.
"""
_moments = GaussianWishartMoments()
_parent_moments = (GaussianGammaMoments(),
GammaMoments(),
WishartMoments(),
WishartPriorMoments())
_distribution = GaussianWishartDistribution()
@classmethod
@ensureparents
def _constructor(cls, mu, alpha, V, n, plates_lambda=None, plates_x=None, **kwargs):
"""
Constructs distribution and moments objects.
This method is called if useconstructor decorator is used for __init__.
`mu` is the mean/location vector
`alpha` is the scale
`V` is the scale matrix
`n` is the degrees of freedom
"""
D = mu.dims[0][0]
# Check shapes
if mu.dims != ( (D,), (D,D), (), () ):
raise ValueError("Mean vector has wrong shape")
if alpha.dims != ( (), () ):
raise ValueError("Scale has wrong shape")
if V.dims != ( (D,D), () ):
raise ValueError("Precision matrix has wrong shape")
if n.dims != ( (), () ):
raise ValueError("Degrees of freedom has wrong shape")
dims = ( (D,), (), (D,D), () )
return (dims,
kwargs,
cls._total_plates(kwargs.get('plates'),
cls._distribution.plates_from_parent(0, mu.plates),
cls._distribution.plates_from_parent(1, alpha.plates),
cls._distribution.plates_from_parent(2, V.plates),
cls._distribution.plates_from_parent(3, n.plates)),
cls._distribution,
cls._moments,
cls._parent_moments)
def random(self):
"""
Draw a random sample from the distribution.
"""
raise NotImplementedError()
def show(self):
"""
Print the distribution using standard parameterization.
"""
raise NotImplementedError()
|
nipunreddevil/bayespy
|
bayespy/inference/vmp/nodes/gaussian_wishart.py
|
Python
|
gpl-3.0
| 10,240
|
[
"Gaussian"
] |
6ca6896295685f4a365a9eb296596a332db1adb8fa82db504138810e28ab8713
|
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/k-means.py
# Example of using K-Means implementation
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace. We will
# also need the random module:
from numpy import *
import random
import peach as p
# In this tutorial, we reproduce the behaviour we seen in the self-organizing
# maps tutorial (please, refer to that tutorial for more information). The
# K-Means algorithm has the ability to find the clusters that partition a given
# set of points. This tutorial shows graphically how this happens. We have a set
# of points in the cartesian plane, each coordinate obtained from a central
# point plus a random (gaussian, average 0, small variance) shift in some
# direction.
# First, we create the training set:
train_size = 300
centers = [ array([ 1.0, 0.0 ], dtype=float),
array([ 1.0, 1.0 ], dtype=float),
array([ 0.0, 1.0 ], dtype=float),
array([-1.0, 1.0 ], dtype=float),
array([-1.0, 0.0 ], dtype=float) ]
xs = [ ]
for i in range(train_size):
x1 = random.gauss(0.0, 0.1)
x2 = random.gauss(0.0, 0.1)
xs.append(centers[i%5] + array([ x1, x2 ], dtype=float))
# Since we are working on the plane, each example and each cluster will have two
# components. We will have five clusters, since we have five centers. The
# K-Means instance is created below.
km = p.KMeans(xs, 5)
for i in range(5):
km.c[i, 0] = 0.3 * cos(i*pi/4)
km.c[i, 1] = 0.3 * sin(i*pi/4)
# The __call__ interface runs the algorithm till completion. It returns the
# centers of the classification. We might pass the parameter imax to the
# algorithm. This is the maximum number of passes. In general, K-Means will
# converge very fastly and with little error. The default value for this
# parameter is 20. Notice, however, that the algorithm automatically stops if
# there are no more changes in the clusters.
c = km()
print "The algorithm converged to the centers:"
print c
print
# If the system has the plot package matplotlib, this tutorial tries to plot
# the training set and the clustered centers. The plot is saved in the file
# ``k-means.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
xs = array(xs)
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
print xs
print c
a1.scatter(xs[:, 0], xs[:, 1], color='black', marker='x')
a1.scatter(c[:, 0], c[:, 1], color='red', marker='o')
savefig("k-means.png")
except ImportError:
pass
|
PaulGrimal/peach
|
tutorial/neural-networks/k-means.py
|
Python
|
lgpl-2.1
| 2,833
|
[
"Gaussian"
] |
db50d71ff53ab2e4a06c774157e2560dcd3739600adbb5e9722c2b3cd6c9b062
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2005-2009 Serge Noiraud
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Adam Stein <[email protected]>
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
ODFDoc : used to generate Open Office Document
"""
#-------------------------------------------------------------------------
#
# pylint : disable messages ...
#
#-------------------------------------------------------------------------
# disable-msg=C0302 # Too many lines in module
# pylint: disable-msg=C0302
# disable-msg # Regular expression which should only match
# pylint: disable-msg=C0103
# disable-msg=R0902 # Too many instance attributes
# pylint: disable-msg=R0902
# disable-msg=R0904 # Too many public methods
# pylint: disable-msg=R0904
# disable-msg=R0912 # Too many branches
# pylint: disable-msg=R0912
# disable-msg=R0913 # Too many arguments
# pylint: disable-msg=R0913
# disable-msg=R0914 # Too many local variables
# pylint: disable-msg=R0914
# disable-msg=R0915 # Too many statements
# pylint: disable-msg=R0915
# warnings :
# disable-msg=W0613 # Unused argument
# pylint: disable-msg=W0613
# errors :
# disable-msg=E1101 # has no member
# pylint: disable-msg=E1101
#-------------------------------------------------------------------------
#
# Standard Python Modules
#
#-------------------------------------------------------------------------
import os
try:
from hashlib import md5
except ImportError:
from md5 import md5
import zipfile
import time
import locale
from cStringIO import StringIO
from math import pi, cos, sin, degrees, radians
from xml.sax.saxutils import escape
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.plug.docgen import (BaseDoc, TextDoc, DrawDoc, graphicstyle,
FONT_SANS_SERIF, SOLID, PAPER_PORTRAIT,
INDEX_TYPE_TOC, PARA_ALIGN_CENTER, PARA_ALIGN_LEFT,
INDEX_TYPE_ALP, PARA_ALIGN_RIGHT, URL_PATTERN,
LOCAL_HYPERLINK, LOCAL_TARGET)
from gramps.gen.plug.docgen.fontscale import string_width
from gramps.plugins.lib.libodfbackend import OdfBackend
from gramps.gen.const import PROGRAM_NAME, VERSION
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.utils.image import image_size, image_dpi, image_actual_size
from gramps.gen.errors import ReportError
#-------------------------------------------------------------------------
#
# internationalization
#
#-------------------------------------------------------------------------
from gramps.gen.ggettext import gettext as _
_apptype = 'application/vnd.oasis.opendocument.text'
_esc_map = {
'\x1a' : '',
'\x0c' : '',
'\n' : '<text:line-break/>',
'\t' : '<text:tab />',
}
#-------------------------------------------------------------------------
#
# regexp for Styled Notes ...
#
#-------------------------------------------------------------------------
import re
# Hyphen is added because it is used to replace spaces in the font name
NewStyle = re.compile('style-name="([a-zA-Z0-9]*)__([#a-zA-Z0-9 -]*)__">')
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_XMLNS = '''\
xmlns:office="%(urn)soffice:1.0"
xmlns:style="%(urn)sstyle:1.0"
xmlns:text="%(urn)stext:1.0"
xmlns:table="%(urn)stable:1.0"
xmlns:draw="%(urn)sdrawing:1.0"
xmlns:fo="%(urn)sxsl-fo-compatible:1.0"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:meta="%(urn)smeta:1.0"
xmlns:number="%(urn)sdatastyle:1.0"
xmlns:svg="%(urn)ssvg-compatible:1.0"
xmlns:chart="%(urn)schart:1.0"
xmlns:dr3d="%(urn)sdr3d:1.0"
xmlns:math="http://www.w3.org/1998/Math/MathML"
xmlns:form="%(urn)sform:1.0"
xmlns:script="%(urn)sscript:1.0"
xmlns:dom="http://www.w3.org/2001/xml-events"
xmlns:xforms="http://www.w3.org/2002/xforms"
''' % {"urn": "urn:oasis:names:tc:opendocument:xmlns:"}
_FONTS = '''\
<style:font-face style:name="Courier"
svg:font-family="Courier"
style:font-family-generic="modern"
style:font-pitch="fixed"/>
<style:font-face style:name="Times New Roman"
svg:font-family="'Times New Roman'"
style:font-family-generic="roman"
style:font-pitch="variable"/>
<style:font-face style:name="Arial"
svg:font-family="Arial"
style:font-family-generic="swiss"
style:font-pitch="variable"/>
'''
_META_XML = '''\
<?xml version="1.0" encoding="UTF-8"?>
<office:document-meta
xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:meta="urn:oasis:names:tc:opendocument:xmlns:meta:1.0"
office:version="1.0">
<office:meta>
<meta:generator>
%(generator)s
</meta:generator>
<dc:title>
</dc:title>
<dc:subject>
</dc:subject>
<dc:description>
</dc:description>
<meta:initial-creator>
%(creator)s
</meta:initial-creator>
<meta:creation-date>
%(date)s
</meta:creation-date>
<dc:creator>
%(creator)s
</dc:creator>
<dc:date>
%(date)s
</dc:date>
<meta:print-date>0-00-00T00:00:00</meta:print-date>
<dc:language>
%(lang)s
</dc:language>
<meta:editing-cycles>1</meta:editing-cycles>
<meta:editing-duration>PT0S</meta:editing-duration>
<meta:user-defined
meta:name="Genealogical Research and Analysis Management Programming System">
http://gramps-project.org
</meta:user-defined>
<meta:user-defined meta:name="Info 1"/>
<meta:user-defined meta:name="Info 2"/>
<meta:user-defined meta:name="Info 3"/>
</office:meta>
</office:document-meta>
'''
_STYLES = '''\
<style:default-style
style:family="graphic">
<style:graphic-properties
draw:shadow-offset-x="0.3cm"
draw:shadow-offset-y="0.3cm"
draw:start-line-spacing-horizontal="0.283cm"
draw:start-line-spacing-vertical="0.283cm"
draw:end-line-spacing-horizontal="0.283cm"
draw:end-line-spacing-vertical="0.283cm"
style:flow-with-text="true"/>
<style:paragraph-properties
style:text-autospace="ideograph-alpha"
style:line-break="strict"
style:writing-mode="lr-tb"
style:font-independent-line-spacing="false">
<style:tab-stops/>
</style:paragraph-properties>
<style:text-properties
style:use-window-font-color="true"
fo:font-size="12pt"
style:font-size-asian="12pt"
style:language-asian="none"
style:country-asian="none"
style:font-size-complex="12pt"
style:language-complex="none"
style:country-complex="none"/>
</style:default-style>
<style:default-style
style:family="paragraph">
<style:paragraph-properties
style:text-autospace="ideograph-alpha"
style:punctuation-wrap="hanging"
style:line-break="strict"
style:tab-stop-distance="2.205cm"
style:writing-mode="page"/>
<style:text-properties
style:font-name="Times New Roman"
fo:font-size="12pt"
style:font-name-asian="Times New Roman"
style:font-size-asian="12pt"
style:font-name-complex="Times New Roman"
style:font-size-complex="12pt"
style:tab-stop-distance="2.205cm"/>
</style:default-style>
<style:default-style
style:family="table">
<style:table-properties
table:border-model="separating"/>
</style:default-style>
<style:default-style
style:family="table-row">
<style:table-row-properties
fo:keep-together="auto"/>
</style:default-style>
<style:style style:name="Standard"
style:family="paragraph" style:class="text"/>
<style:style style:name="photo"
style:family="graphic">
<style:graphic-properties
text:anchor-type="paragraph"
svg:x="0cm" svg:y="0cm" style:wrap="none"
style:vertical-pos="top"
style:vertical-rel="paragraph-content"
style:horizontal-pos="center"
style:horizontal-rel="paragraph-content"/>
</style:style>
'''
_AUTOMATIC_STYLES = '''\
<style:style style:name="docgen_page_break"
style:family="paragraph"
style:parent-style-name="Standard">
<style:paragraph-properties
fo:break-before="page"/>
</style:style>
<style:style style:name="GSuper"
style:family="text">
<style:text-properties
style:text-position="super 58%"/>
</style:style>
<style:style style:name="GRAMPS-preformat"
style:family="text">
<style:text-properties
style:font-name="Courier"/>
</style:style>
'''
_CLEAR_STYLE = '''\
<style:style style:name="clear"
style:family="graphic">\n
<style:graphic-properties draw:stroke="none"
draw:fill="none" draw:shadow="hidden"
style:run-through="foreground"
style:vertical-pos="from-top"
style:vertical-rel="paragraph"
style:horizontal-pos="from-left"
style:horizontal-rel="paragraph"
draw:wrap-influence-on-position="once-concurrent"
style:flow-with-text="false"/>
</style:style>\n
'''
_OTHER_STYLES = '''\
<style:style style:name="Tbold"
style:family="text">\n
<style:text-properties fo:font-weight="bold"/>\n
</style:style>\n
<style:style style:name="Titalic"
style:family="text">\n
<style:text-properties fo:font-style="italic"/>\n
</style:style>\n
<style:style style:name="Tunderline"
style:family="text">\n
<style:text-properties
style:text-underline-style="solid"
style:text-underline-width="auto"
style:text-underline-color="font-color"/>
</style:style>\n
<style:style style:name="Left"
style:family="graphic"
style:parent-style-name="photo">
<style:graphic-properties
style:run-through="foreground"
style:wrap="dynamic"
style:number-wrapped-paragraphs="no-limit"
style:wrap-contour="false"
style:vertical-pos="from-top"
style:vertical-rel="paragraph-content"
style:horizontal-pos="left"
style:horizontal-rel="paragraph-content"
style:mirror="none" fo:clip="rect(0cm 0cm 0cm 0cm)"
draw:luminance="0%" draw:contrast="0" draw:red="0%"
draw:green="0%" draw:blue="0%" draw:gamma="1"
draw:color-inversion="false"
draw:transparency="-100%"
draw:color-mode="standard"/>
</style:style>\n
<style:style style:name="Right"
style:family="graphic"
style:parent-style-name="photo">
<style:graphic-properties
style:run-through="foreground"
style:wrap="dynamic"
style:number-wrapped-paragraphs="no-limit"
style:wrap-contour="false"
style:vertical-pos="from-top"
style:vertical-rel="paragraph-content"
style:horizontal-pos="right"
style:horizontal-rel="paragraph-content"
style:mirror="none" fo:clip="rect(0cm 0cm 0cm 0cm)"
draw:luminance="0%" draw:contrast="0" draw:red="0%"
draw:green="0%" draw:blue="0%" draw:gamma="1"
draw:color-inversion="false"
draw:transparency="-100%"
draw:color-mode="standard"/>
</style:style>\n
<style:style style:name="Single"
style:family="graphic"
style:parent-style-name="Graphics">
<style:graphic-properties
style:vertical-pos="from-top"
style:mirror="none" fo:clip="rect(0cm 0cm 0cm 0cm)"
draw:luminance="0%" draw:contrast="0" draw:red="0%"
draw:green="0%" draw:blue="0%" draw:gamma="1"
draw:color-inversion="false"
draw:transparency="-100%"
draw:color-mode="standard"/>
</style:style>\n
<style:style style:name="Row" style:family="graphic"
style:parent-style-name="Graphics">
<style:graphic-properties
style:vertical-pos="from-top"
style:vertical-rel="paragraph"
style:horizontal-pos="from-left"
style:horizontal-rel="paragraph"
style:mirror="none" fo:clip="rect(0cm 0cm 0cm 0cm)"
draw:luminance="0%" draw:contrast="0" draw:red="0%"
draw:green="0%" draw:blue="0%" draw:gamma="1"
draw:color-inversion="false"
draw:transparency="-100%"
draw:color-mode="standard"/>
</style:style>\n
'''
_SHEADER_FOOTER = '''\
<style:style style:name="S-Header"
style:family="paragraph"
style:parent-style-name="Standard">
<style:paragraph-properties fo:text-align="center"
style:justify-single-word="false"/>
</style:style>\n
<style:style style:name="S-Footer"
style:family="paragraph"
style:parent-style-name="Header">
<style:paragraph-properties fo:text-align="center"
style:justify-single-word="false"/>
</style:style>\n
'''
_CLICKABLE = r'''<text:a xlink:type="simple" xlink:href="\1">\1</text:a>'''
#-------------------------------------------------------------------------
#
# ODFDoc
#
#-------------------------------------------------------------------------
class ODFDoc(BaseDoc, TextDoc, DrawDoc):
"""
The ODF document class
"""
def __init__(self, styles, ftype):
"""
Class init
"""
BaseDoc.__init__(self, styles, ftype)
self.media_list = []
self.init_called = False
self.cntnt = None
self.cntnt1 = None
self.cntnt2 = None
self.cntntx = None
self.sfile = None
self.mimetype = None
self.meta = None
self.mfile = None
self.stfile = None
self.filename = None
self.lang = None
self._backend = None
self.span = 0
self.level = 0
self.time = "0000-00-00T00:00:00"
self.new_page = 0
self.new_cell = 0
self.page = 0
self.first_page = 1
self.StyleList_notes = [] # styles to create depending on styled notes.
self.StyleList_photos = [] # styles to create depending on clipped images.
def open(self, filename):
"""
Open the new document
"""
t = time.localtime(time.time())
self.time = "%04d-%02d-%02dT%02d:%02d:%02d" % t[:6]
self.filename = filename
if not filename.endswith("odt"):
self.filename += ".odt"
self.filename = os.path.normpath(os.path.abspath(self.filename))
self._backend = OdfBackend()
self.cntnt = StringIO()
self.cntnt1 = StringIO()
self.cntnt2 = StringIO()
def init(self):
"""
Create the document header
"""
assert (not self.init_called)
self.init_called = True
wrt = self.cntnt.write
wrt1, wrt2 = self.cntnt1.write, self.cntnt2.write
current_locale = locale.getlocale()
self.lang = current_locale[0]
self.lang = self.lang.replace('_', '-') if self.lang else "en-US"
self.StyleList_notes = [] # styles to create depending on styled notes.
wrt1('<?xml version="1.0" encoding="UTF-8"?>\n'
'<office:document-content\n' +
_XMLNS +
'office:version="1.0">\n' +
'<office:scripts/>\n'
)
wrt1('<office:font-face-decls>\n' +
_FONTS
)
wrt2(
'</office:font-face-decls>\n' +
'<office:automatic-styles>\n' +
_AUTOMATIC_STYLES
)
styles = self.get_style_sheet()
for style_name in styles.get_draw_style_names():
style = styles.get_draw_style(style_name)
wrt(
'<style:style ' +
'style:name="%s" ' % style_name +
'style:family="graphic">\n'
'<style:graphic-properties '
)
if style.get_line_width():
wrt(
'svg:stroke-width="%.2f" '
% (style.get_line_width()*10) +
'draw:marker-start="" '
'draw:marker-start-width="0.0" '
'draw:marker-end-width="0.0" '
'draw:textarea-horizontal-align="center" '
'draw:textarea-vertical-align="middle" '
)
if style.get_line_style() != SOLID:
#wrt('svg:fill-color="#ff0000" ')
wrt('draw:stroke="dash" draw:stroke-dash="gramps_%s" ' % style.get_dash_style_name())
else:
wrt('draw:stroke="solid" ')
else:
wrt(
'draw:stroke="none" '
'draw:stroke-color="#000000" '
)
wrt(
'svg:fill-color="#%02x%02x%02x" '
% style.get_color() +
'draw:fill-color="#%02x%02x%02x" '
% style.get_fill_color() +
'draw:shadow="hidden" '
'style:run-through="foreground" '
'style:vertical-pos="from-top" '
'style:vertical-rel="paragraph" '
'style:horizontal-pos="from-left" '
'style:horizontal-rel="paragraph" '
'draw:wrap-influence-on-position='
'"once-concurrent" '
'style:flow-with-text="false" '
'/>\n'
'</style:style>\n'
)
wrt(
'<style:style '
'style:name="%s_shadow" ' % style_name +
'style:family="graphic">\n'
'<style:graphic-properties '
'draw:stroke="none" '
'draw:fill="solid" '
'draw:fill-color="#cccccc" '
'draw:textarea-horizontal-align="center" '
'draw:textarea-vertical-align="middle" '
'draw:shadow="hidden" '
'style:run-through="foreground" '
'style:vertical-pos="from-top" '
'style:vertical-rel="paragraph" '
'style:horizontal-pos="from-left" '
'style:horizontal-rel="paragraph" '
'draw:wrap-influence-on-position='
'"once-concurrent" '
'style:flow-with-text="false" '
'/>\n'
'</style:style>\n'
)
# Graphic style for items with a clear background
wrt(
_CLEAR_STYLE
)
for style_name in styles.get_paragraph_style_names():
style = styles.get_paragraph_style(style_name)
wrt(
'<style:style style:name="NL%s" ' % style_name +
'style:family="paragraph" ' +
'style:parent-style-name="%s">\n' % style_name +
'<style:paragraph-properties ' +
'fo:break-before="page"/>\n' +
'</style:style>\n' +
'<style:style style:name="X%s" ' % style_name +
'style:family="paragraph"' +
'>\n' +
'<style:paragraph-properties '
)
if style.get_padding() != 0.0:
wrt('fo:padding="%.2fcm" ' % style.get_padding())
if style.get_header_level() > 0:
wrt('fo:keep-with-next="auto" ')
align = style.get_alignment()
if align == PARA_ALIGN_LEFT:
wrt('fo:text-align="start" ')
elif align == PARA_ALIGN_RIGHT:
wrt('fo:text-align="end" ')
elif align == PARA_ALIGN_CENTER:
wrt(
'fo:text-align="center" '
'style:justify-single-word="false" '
)
else:
wrt(
'fo:text-align="justify" '
'style:justify-single-word="false" '
)
font = style.get_font()
wrt('style:font-name="%s" ' %
("Arial"
if font.get_type_face() == FONT_SANS_SERIF else
"Times New Roman")
)
wrt(
'fo:font-size="%.2fpt" ' % font.get_size() +
'style:font-size-asian="%.2fpt" ' % font.get_size() +
'fo:color="#%02x%02x%02x" ' % font.get_color()
)
if font.get_bold():
wrt('fo:font-weight="bold" ')
if font.get_italic():
wrt('fo:font-style="italic" ')
if font.get_underline():
wrt(
'style:text-underline="single" '
'style:text-underline-color="font-color" '
)
wrt(
'fo:text-indent="%.2fcm"\n' % style.get_first_indent() +
'fo:margin-right="%.2fcm"\n' % style.get_right_margin() +
'fo:margin-left="%.2fcm"\n' % style.get_left_margin() +
'fo:margin-top="%.2fcm"\n' % style.get_top_margin() +
'fo:margin-bottom="%.2fcm"\n' % style.get_bottom_margin() +
'/>\n' +
'</style:style>\n'
)
wrt(
'<style:style style:name="F%s" ' % style_name +
'style:family="text">\n' +
'<style:text-properties '
)
align = style.get_alignment()
if align == PARA_ALIGN_LEFT:
wrt('fo:text-align="start" ')
elif align == PARA_ALIGN_RIGHT:
wrt('fo:text-align="end" ')
elif align == PARA_ALIGN_CENTER:
wrt(
'fo:text-align="center" '
'style:justify-single-word="false" '
)
font = style.get_font()
wrt('style:font-name="%s" ' %
("Arial"
if font.get_type_face() == FONT_SANS_SERIF else
"Times New Roman")
)
color = font.get_color()
wrt('fo:color="#%02x%02x%02x" ' % color)
if font.get_bold():
wrt('fo:font-weight="bold" ')
if font.get_italic():
wrt('fo:font-style="italic" ')
wrt(
'fo:font-size="%.2fpt" ' % font.get_size() +
'style:font-size-asian="%.2fpt"/> ' % font.get_size() +
'</style:style>\n'
)
for style_name in styles.get_table_style_names():
style = styles.get_table_style(style_name)
table_width = float(self.get_usable_width())
table_width_str = "%.2f" % table_width
wrt(
'<style:style style:name="%s" ' % style_name +
'style:family="table-properties">\n'
'<style:table-properties-properties ' +
'style:width="%scm" ' % table_width_str +
'/>\n' +
'</style:style>\n'
)
for col in range(0, min(style.get_columns(), 50)):
width = table_width * float(style.get_column_width(col) / 100.0)
width_str = "%.4f" % width
wrt(
'<style:style style:name="%s.%s" '
% (style_name, chr(ord('A')+col)) +
'style:family="table-column">' +
'<style:table-column-properties ' +
'style:column-width="%scm"/>' % width_str +
'</style:style>\n'
)
for cell in styles.get_cell_style_names():
cell_style = styles.get_cell_style(cell)
wrt(
'<style:style style:name="%s" ' % cell +
'style:family="table-cell">\n' +
'<style:table-cell-properties' +
' fo:padding="%.2fcm"' % cell_style.get_padding()
)
wrt(' fo:border-top="%s"' %
("0.002cm solid #000000"
if cell_style.get_top_border() else
"none")
)
wrt(' fo:border-bottom="%s"' %
("0.002cm solid #000000"
if cell_style.get_bottom_border() else
"none")
)
wrt(' fo:border-left="%s"' %
("0.002cm solid #000000"
if cell_style.get_left_border() else
"none")
)
wrt(' fo:border-right="%s"' %
("0.002cm solid #000000"
if cell_style.get_right_border() else
"none")
)
wrt(
'/>\n'
'</style:style>\n'
)
wrt(
_OTHER_STYLES
)
wrt(
'</office:automatic-styles>\n'
'<office:body>\n'
' <office:text>\n'
' <office:forms '
'form:automatic-focus="false" '
'form:apply-design-mode="false"/>\n'
)
def uniq(self, list_, funct=None):
"""
We want no duplicate in the list
"""
# order preserving
funct = funct or (lambda x:x)
seen = set()
result = []
for item in list_:
marker = funct(item[0])
if marker in seen:
continue
seen.add(marker)
result.append(item)
return result
def finish_cntnt_creation(self):
"""
We have finished the document.
So me must integrate the new fonts and styles where they should be.
The content.xml file is closed.
"""
self.cntntx = StringIO()
self.StyleList_notes = self.uniq(self.StyleList_notes)
self.add_styled_notes_fonts()
self.add_styled_notes_styles()
self.add_styled_photo_styles()
self.cntntx.write(self.cntnt1.getvalue())
self.cntntx.write(self.cntnt2.getvalue())
self.cntntx.write(self.cntnt.getvalue())
self.cntnt1.close()
self.cntnt2.close()
self.cntnt.close()
def close(self):
"""
Close the document and create the odt file
"""
self.cntnt.write(
'</office:text>\n'
'</office:body>\n'
'</office:document-content>\n'
)
self.finish_cntnt_creation()
self._write_styles_file()
self._write_manifest()
self._write_settings()
self._write_meta_file()
self._write_mimetype_file()
self._write_zip()
def add_styled_notes_fonts(self):
"""
Add the new fonts for Styled notes in the font-face-decls section.
"""
# Need to add new font for styled notes here.
wrt1 = self.cntnt1.write
for style in self.StyleList_notes:
if style[1] == "FontFace":
# Restore any spaced that were replaced by hyphens in
# libodfbackend
wrt1(
'<style:font-face ' +
' style:name="%s"\n' % style[2].replace("-", " ") +
' svg:font-family="'%s'"\n' %
style[2].replace("-", " ") +
' style:font-pitch="fixed"/>\n\n'
)
def add_styled_notes_styles(self):
"""
Add the new styles for Styled notes in the automatic-styles section.
"""
# Need to add new style for styled notes here.
wrt2 = self.cntnt2.write
for style in self.StyleList_notes:
if style[1] == "FontSize":
wrt2(
'<style:style ' +
'style:name="FontSize__%s__"\n' % style[2] +
' style:family="text">\n' +
' <style:text-properties\n' +
' fo:font-size="%spt"\n' % style[2] +
' style:font-size-asian="%spt"\n' % style[2] +
' style:font-size-complex="%spt"/>\n' % style[2] +
'</style:style>\n\n'
)
elif style[1] == "FontColor":
# Restore the hash at the start that was removed by
# libodfbackend
wrt2(
'<style:style ' +
'style:name="FontColor__%s__"\n' % style[2] +
' style:family="text">\n' +
' <style:text-properties\n' +
' fo:color="#%s"/>\n' % style[2] +
'</style:style>\n\n'
)
elif style[1] == "FontHighlight":
wrt2(
'<style:style ' +
'style:name="FontHighlight__%s__"\n' % style[2] +
' style:family="text">\n' +
' <style:text-properties\n' +
' fo:background-color="#%s"/>\n' % style[2] +
'</style:style>\n\n'
)
elif style[1] == "FontFace":
# Restore any spaced that were replaced by hyphens in
# libodfbackend
wrt2(
'<style:style ' +
'style:name="FontFace__%s__"\n' % style[2] +
' style:family="text">\n' +
' <style:text-properties\n' +
' style:font-name="%s"\n' %
style[2].replace("-", " ") +
' style:font-pitch="variable"/>\n' +
'</style:style>\n\n'
)
def add_styled_photo_styles(self):
"""
Add the new styles for clipped images in the automatic-styles section.
"""
wrt2 = self.cntnt2.write
for style in self.StyleList_photos:
if style[0] == "Left":
wrt2(
'<style:style ' +
'style:name="Left_%s" ' % str(style[1]) +
'style:family="graphic" ' +
'style:parent-style-name="photo">' +
'<style:graphic-properties ' +
'style:run-through="foreground" ' +
'style:wrap="dynamic" ' +
'style:number-wrapped-paragraphs="no-limit" ' +
'style:wrap-contour="false" ' +
'style:vertical-pos="from-top" ' +
'style:vertical-rel="paragraph-content" ' +
'style:horizontal-pos="left" ' +
'style:horizontal-rel="paragraph-content" ' +
'style:mirror="none" ' +
'fo:clip="rect(%fin %fin %fin %fin)" ' % style[1] +
'draw:luminance="0%" ' +
'draw:contrast="0" ' +
'draw:red="0%" ' +
'draw:green="0%" ' +
'draw:blue="0%" ' +
'draw:gamma="1" ' +
'draw:color-inversion="false" ' +
'draw:transparency="-100%" ' +
'draw:color-mode="standard"/>' +
'</style:style>\n'
)
elif style[0] == "Right":
wrt2(
'<style:style ' +
'style:name="Right_%s" ' % str(style[1]) +
'style:family="graphic" ' +
'style:parent-style-name="photo">' +
'<style:graphic-properties ' +
'style:run-through="foreground" ' +
'style:wrap="dynamic" ' +
'style:number-wrapped-paragraphs="no-limit" ' +
'style:wrap-contour="false" ' +
'style:vertical-pos="from-top" ' +
'style:vertical-rel="paragraph-content" ' +
'style:horizontal-pos="right" ' +
'style:horizontal-rel="paragraph-content" ' +
'style:mirror="none" ' +
'fo:clip="rect(%fin %fin %fin %fin)" ' % style[1] +
'draw:luminance="0%" ' +
'draw:contrast="0" ' +
'draw:red="0%" ' +
'draw:green="0%" ' +
'draw:blue="0%" ' +
'draw:gamma="1" ' +
'draw:color-inversion="false" ' +
'draw:transparency="-100%" ' +
'draw:color-mode="standard"/>' +
'</style:style>\n'
)
elif style[0] == "Single":
wrt2(
'<style:style ' +
'style:name="Single_%s" ' % str(style[1]) +
'style:family="graphic" ' +
'<style:graphic-properties ' +
'style:vertical-pos="from-top" ' +
'style:mirror="none" ' +
'fo:clip="rect(%fin %fin %fin %fin)" ' % style[1] +
'draw:luminance="0%" ' +
'draw:contrast="0" ' +
'draw:red="0%" ' +
'draw:green="0%" ' +
'draw:blue="0%" ' +
'draw:gamma="1" ' +
'draw:color-inversion="false" ' +
'draw:transparency="-100%" ' +
'draw:color-mode="standard"/>' +
'</style:style>\n'
)
else:
wrt2(
'<style:style ' +
'style:name="Row_%s" ' % str(style[1]) +
'style:family="graphic" ' +
'style:parent-style-name="Graphics">' +
'<style:graphic-properties ' +
'style:vertical-pos="from-top" ' +
'style:vertical-rel="paragraph" ' +
'style:horizontal-pos="from-left" ' +
'style:horizontal-rel="paragraph" ' +
'style:mirror="none" ' +
'fo:clip="rect(%fin %fin %fin %fin)" ' % style[1] +
'draw:luminance="0%" ' +
'draw:contrast="0" ' +
'draw:red="0%" ' +
'draw:green="0%" ' +
'draw:blue="0%" ' +
'draw:gamma="1" ' +
'draw:color-inversion="false" ' +
'draw:transparency="-100%" ' +
'draw:color-mode="standard"/>' +
'</style:style>\n'
)
def add_media_object(self, file_name, pos, x_cm, y_cm, alt='', style_name=None, crop=None):
"""
Add multi-media documents : photos
"""
# try to open the image. If the open fails, it probably wasn't
# a valid image (could be a PDF, or a non-image)
(x, y) = image_size(file_name)
if (x, y) == (0, 0):
return
not_extension, extension = os.path.splitext(file_name)
odf_name = md5(file_name).hexdigest() + extension
media_list_item = (file_name, odf_name)
if not media_list_item in self.media_list:
self.media_list.append(media_list_item)
base = escape(os.path.basename(file_name))
tag = base.replace('.', '_')
if self.new_cell:
self.cntnt.write('<text:p>')
pos = pos.title() if pos in ['left', 'right', 'single'] else 'Row'
if crop:
dpi = image_dpi(file_name)
if dpi:
(act_width, act_height) = image_actual_size(
x_cm, y_cm, crop[2] - crop[0], crop[3] - crop[1]
)
left = ((crop[0]/100.0)*x)/dpi[0]
right = (x - ((crop[2]/100.0)*x))/dpi[0]
top = ((crop[1]/100.0)*y)/dpi[1]
bottom = (y - ((crop[3]/100.0)*y))/dpi[1]
crop = (top, right, bottom, left)
self.StyleList_photos.append(
[pos, crop]
)
pos += "_" + str(crop)
else:
(act_width, act_height) = image_actual_size(x_cm, y_cm, x, y)
else:
(act_width, act_height) = image_actual_size(x_cm, y_cm, x, y)
if len(alt):
self.cntnt.write(
'<draw:frame draw:style-name="%s" ' % pos +
'draw:name="caption_%s" ' % tag +
'text:anchor-type="paragraph" ' +
'svg:y="0in" ' +
'svg:width="%.2fcm" ' % act_width +
'draw:z-index="34"> ' +
'<draw:text-box fo:min-height="%.2fcm"> ' % act_height +
'<text:p text:style-name="%s">' % style_name
)
self.cntnt.write(
'<draw:frame draw:style-name="%s" ' % pos +
'draw:name="%s" ' % tag +
'text:anchor-type="paragraph" ' +
'svg:width="%.2fcm" ' % act_width +
'svg:height="%.2fcm" ' % act_height +
'draw:z-index="1" >' +
'<draw:image xlink:href="Pictures/%s" ' % odf_name +
'xlink:type="simple" xlink:show="embed" ' +
'xlink:actuate="onLoad"/>\n' +
'</draw:frame>\n'
)
if len(alt):
self.cntnt.write(
'%s' % '<text:line-break/>'.join(alt) +
'</text:p>' +
'</draw:text-box>' +
'</draw:frame>'
)
if self.new_cell:
self.cntnt.write('</text:p>\n')
def start_table(self, name, style_name):
"""
open a table
"""
self.cntnt.write(
'<table:table table:name="%s" ' % name +
'table:style-name="%s">\n' % style_name
)
styles = self.get_style_sheet()
table = styles.get_table_style(style_name)
for col in range(table.get_columns()):
self.cntnt.write(
'<table:table-column table:style-name="%s.%s"/>\n'
% (style_name, str(chr(ord('A')+col)))
)
def end_table(self):
"""
close a table
"""
self.cntnt.write('</table:table>\n')
def start_row(self):
"""
open a row
"""
self.cntnt.write('<table:table-row>\n')
def end_row(self):
"""
close a row
"""
self.cntnt.write('</table:table-row>\n')
def start_cell(self, style_name, span=1):
"""
open a cell
"""
self.span = span
self.cntnt.write(
'<table:table-cell table:style-name="%s" ' % style_name +
'table:value-type="string"'
)
if span > 1:
self.cntnt.write(' table:number-columns-spanned="%s">\n' % span)
else:
self.cntnt.write('>\n')
self.new_cell = 1
def end_cell(self):
"""
close a cell
"""
self.cntnt.write('</table:table-cell>\n')
#for col in range(1, self.span):
# self.cntnt.write('<table:covered-table-cell/>\n')
self.new_cell = 0
def start_bold(self):
"""
open bold
"""
self.cntnt.write('<text:span text:style-name="Tbold">')
def end_bold(self):
"""
close bold
"""
self.cntnt.write('</text:span>')
def start_superscript(self):
"""
open superscript
"""
self.cntnt.write('<text:span text:style-name="GSuper">')
def end_superscript(self):
"""
close superscript
"""
self.cntnt.write('</text:span>')
def _add_zip(self, zfile, name, data, t):
"""
Add a zip file to an archive
"""
zipinfo = zipfile.ZipInfo(name.encode('utf-8'))
zipinfo.date_time = t
zipinfo.compress_type = zipfile.ZIP_DEFLATED
zipinfo.external_attr = 0644 << 16L
zfile.writestr(zipinfo, data)
def _write_zip(self):
"""
Create the odt file. This is a zip file
"""
try:
zfile = zipfile.ZipFile(self.filename, "w", zipfile.ZIP_DEFLATED)
except IOError, msg:
errmsg = "%s\n%s" % (_("Could not create %s") % self.filename, msg)
raise ReportError(errmsg)
except:
raise ReportError(_("Could not create %s") % self.filename)
t = time.localtime(time.time())[:6]
self._add_zip(zfile, "META-INF/manifest.xml", self.mfile.getvalue(), t)
self._add_zip(zfile, "content.xml", self.cntntx.getvalue(), t)
self._add_zip(zfile, "meta.xml", self.meta.getvalue(), t)
self._add_zip(zfile, "settings.xml", self.stfile.getvalue(), t)
self._add_zip(zfile, "styles.xml", self.sfile.getvalue(), t)
self._add_zip(zfile, "mimetype", self.mimetype.getvalue(), t)
self.mfile.close()
self.cntnt.close()
self.meta.close()
self.stfile.close()
self.sfile.close()
self.mimetype.close()
for image in self.media_list:
try:
ifile = open(image[0], mode='rb')
self._add_zip(zfile, "Pictures/%s" % image[1], ifile.read(), t)
ifile.close()
except:
errmsg = "%s\n%s" % (_("Could not open %s") % image[0],
msg)
raise ReportError(errmsg)
zfile.close()
def _write_styles_file(self):
"""
create the styles.xml file
"""
self.sfile = StringIO()
wrtf = self.sfile.write
wrtf('<?xml version="1.0" encoding="UTF-8"?>\n')
wrtf('<office:document-styles ' +
_XMLNS +
'office:version="1.0">\n'
)
wrtf('<office:font-face-decls>\n' +
_FONTS +
'</office:font-face-decls>\n'
)
wrtf('<office:styles>\n' +
_STYLES
)
styles = self.get_style_sheet()
for style_name in styles.get_paragraph_style_names():
style = styles.get_paragraph_style(style_name)
wrtf(
'<style:style style:name="%s" ' % style_name +
'style:family="paragraph" ' +
'style:parent-style-name="Standard" ' +
'style:class="text">\n' +
'<style:paragraph-properties\n' +
'fo:margin-left="%.2fcm"\n'
% style.get_left_margin() +
'fo:margin-right="%.2fcm"\n'
% style.get_right_margin() +
'fo:margin-top="%.2fcm"\n'
% style.get_top_margin() +
'fo:margin-bottom="%.2fcm"\n'
% style.get_bottom_margin()
)
if style.get_padding() != 0.0:
wrtf('fo:padding="%.2fcm" ' % style.get_padding())
if style.get_header_level() > 0:
wrtf('fo:keep-with-next="auto" ')
align = style.get_alignment()
if align == PARA_ALIGN_LEFT:
wrtf(
'fo:text-align="start" '
'style:justify-single-word="false" '
)
elif align == PARA_ALIGN_RIGHT:
wrtf('fo:text-align="end" ')
elif align == PARA_ALIGN_CENTER:
wrtf(
'fo:text-align="center" '
'style:justify-single-word="false" '
)
else:
wrtf(
'fo:text-align="justify" '
'style:justify-single-word="false" '
)
wrtf(
'fo:text-indent="%.2fcm" ' % style.get_first_indent() +
'style:auto-text-indent="false"/> ' +
'<style:text-properties '
)
font = style.get_font()
color = font.get_color()
wrtf('fo:color="#%02x%02x%02x" ' % color)
wrtf('style:font-name="%s" ' %
("Arial"
if font.get_type_face() == FONT_SANS_SERIF else
"Times New Roman")
)
wrtf('fo:font-size="%.0fpt" ' % font.get_size())
if font.get_italic():
wrtf('fo:font-style="italic" ')
if font.get_bold():
wrtf('fo:font-weight="bold" ')
if font.get_underline():
wrtf(
'style:text-underline="single" ' +
'style:text-underline-color="font-color" ' +
'fo:text-indent="%.2fcm" ' % style.get_first_indent() +
'fo:margin-right="%.2fcm" ' % style.get_right_margin() +
'fo:margin-left="%.2fcm" ' % style.get_left_margin() +
'fo:margin-top="%.2fcm" ' % style.get_top_margin() +
'fo:margin-bottom="%.2fcm"\n' % style.get_bottom_margin()
)
wrtf(
'/>\n'
'</style:style>\n'
)
# Dash lengths are based on the OpenOffice Ultrafine Dashed line style.
for line_style in graphicstyle.line_style_names:
dash_array = graphicstyle.get_line_style_by_name(line_style)
wrtf('<draw:stroke-dash draw:name="gramps_%s" draw:style="rect" '
'draw:dots1="%d" draw:dots1-length="0.102cm" '
'draw:dots2="%d" draw:dots2-length="0.102cm" '
'draw:distance="%5.3fcm" />\n' % (line_style, dash_array[0], dash_array[0], dash_array[1] * 0.051))
# Current no leading number format for headers
#wrtf('<text:outline-style>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="1" style:num-format=""/>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="2" style:num-format=""/>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="3" style:num-format=""/>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="4" style:num-format=""/>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="5" style:num-format=""/>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="6" style:num-format=""/>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="7" style:num-format=""/>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="8" style:num-format=""/>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="9" style:num-format=""/>\n')
#wrtf('<text:outline-level-style ')
#wrtf('text:level="10" style:num-format=""/>\n')
#wrtf('</text:outline-style>\n')
wrtf(
'<text:notes-configuration '
'text:note-class="footnote" '
'style:num-format="1" '
'text:start-value="0" '
'text:footnotes-position="page" '
'text:start-numbering-at="document"/> '
)
wrtf(
'<text:notes-configuration '
'text:note-class="endnote" '
'style:num-format="i" '
'text:start-value="0"/> '
)
wrtf(
'<text:linenumbering-configuration '
'text:number-lines="false" '
'text:offset="0.499cm" '
'style:num-format="1" '
'text:number-position="left" '
'text:increment="5"/> '
)
wrtf('</office:styles>\n')
wrtf(
'<office:automatic-styles>\n' +
_SHEADER_FOOTER +
'<style:page-layout style:name="pm1">\n' +
'<style:page-layout-properties ' +
'fo:page-width="%.2fcm" '
% self.paper.get_size().get_width() +
'fo:page-height="%.2fcm" '
% self.paper.get_size().get_height() +
'style:num-format="1" '
)
wrtf('style:print-orientation="%s" ' %
("portrait"
if self.paper.get_orientation() == PAPER_PORTRAIT else
"landscape")
)
wrtf(
'fo:margin-top="%.2fcm" '
% self.paper.get_top_margin() +
'fo:margin-bottom="%.2fcm" '
% self.paper.get_bottom_margin() +
'fo:margin-left="%.2fcm" '
% self.paper.get_left_margin() +
'fo:margin-right="%.2fcm" '
% self.paper.get_right_margin() +
'style:writing-mode="lr-tb" ' +
'style:footnote-max-height="0cm">\n' +
'<style:footnote-sep style:width="0.018cm" ' +
'style:distance-before-sep="0.101cm" ' +
'style:distance-after-sep="0.101cm" ' +
'style:adjustment="left" style:rel-width="25%" ' +
'style:color="#000000"/>\n' +
'</style:page-layout-properties>\n'
)
# header
wrtf(
'<style:header-style>\n'
'<style:header-footer-properties '
'fo:min-height="0cm" fo:margin-bottom="0.499cm"/>\n'
'</style:header-style>\n'
)
# footer
wrtf(
'<style:footer-style>\n'
'<style:header-footer-properties '
'fo:min-height="0cm" fo:margin-bottom="0.499cm"/>\n'
'</style:footer-style>\n'
)
# End of page layout
wrtf(
'</style:page-layout>\n'
'</office:automatic-styles>\n'
)
# Master Styles
wrtf(
'<office:master-styles>\n'
'<style:master-page style:name="Standard" '
'style:page-layout-name="pm1">\n'
# header
#'<style:header>'
#'<text:p text:style-name="S-Header">'
# How to get the document title here ?
#' TITRE : %s' % self.title
#'</text:p>'
#'</style:header>'
# footer
#'<style:footer>'
#'<text:p text:style-name="S-Footer">'
#'<text:page-number text:select-page="current">1'
#'</text:page-number>/'
#'<text:page-count>1'
#'</text:page-count>'
#'</text:p>'
#'</style:footer>'
#
'</style:master-page>'
'</office:master-styles>\n'
)
# End of document styles
wrtf('</office:document-styles>\n')
def page_break(self):
"""
prepare a new page
"""
self.new_page = 1
def start_page(self):
"""
create a new page
"""
self.cntnt.write('<text:p text:style-name="docgen_page_break">\n')
def end_page(self):
"""
close the page
"""
self.cntnt.write('</text:p>\n')
def start_paragraph(self, style_name, leader=None):
"""
open a new paragraph
"""
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
self.level = style.get_header_level()
if self.new_page == 1:
self.new_page = 0
name = "NL%s" % style_name
else:
name = style_name
if self.level == 0:
self.cntnt.write('<text:p text:style-name="%s">' % name)
else:
self.cntnt.write(
'<text:h text:style-name="%s"' % name +
' text:outline-level="%s">' % str(self.level)
)
if leader is not None:
self.cntnt.write(leader + '<text:tab/>')
self.new_cell = 0
def end_paragraph(self):
"""
close a paragraph
"""
self.cntnt.write(
'</text:%s>\n' % ('p' if self.level == 0 else 'h')
)
self.new_cell = 1
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the ODF doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. ODFDoc prints the html without handling it
links: bool, make URLs clickable if True
"""
text = str(styledtext)
s_tags = styledtext.get_tags()
markuptext = self._backend.add_markup_from_styled(text, s_tags, '\n')
if links == True:
markuptext = re.sub(URL_PATTERN, _CLICKABLE, markuptext)
# we need to know if we have new styles to add.
# if markuptext contains : FontColor, FontFace, FontSize ...
# we must prepare the new styles for the styles.xml file.
# We are looking for the following format :
# style-name="([a-zA-Z0-9]*)__([a-zA-Z0-9 ])">
# The first element is the StyleType and the second one is the value
start = 0
while 1:
m = NewStyle.search(markuptext, start)
if not m:
break
self.StyleList_notes.append([m.group(1)+m.group(2),
m.group(1),
m.group(2)])
start = m.end()
linenb = 1
self.start_paragraph(style_name)
for line in markuptext.split('\n'):
[line, sigcount] = process_spaces(line, format)
if sigcount == 0:
self.end_paragraph()
self.start_paragraph(style_name)
linenb = 1
else:
if ( linenb > 1 ):
self.cntnt.write('<text:line-break/>')
self.cntnt.write(line)
linenb += 1
self.end_paragraph()
def write_text(self, text, mark=None, links=False):
"""
Uses the xml.sax.saxutils.escape function to convert XML
entities. The _esc_map dictionary allows us to add our own
mappings.
@param mark: IndexMark to use for indexing
"""
text = escape(text, _esc_map)
if links == True:
text = re.sub(URL_PATTERN, _CLICKABLE, text)
self._write_mark(mark, text)
self.cntnt.write(text)
def _write_mark(self, mark, text):
"""
Insert a mark at this point in the document.
"""
if mark:
key = escape(mark.key, _esc_map)
key = key.replace('"', '"')
if mark.type == INDEX_TYPE_ALP:
self.cntnt.write(
'<text:alphabetical-index-mark '
'text:string-value="%s" />' % key
)
elif mark.type == INDEX_TYPE_TOC:
self.cntnt.write(
'<text:toc-mark ' +
'text:string-value="%s" ' % key +
'text:outline-level="%d" />' % mark.level
)
elif mark.type == LOCAL_HYPERLINK:
self.cntnt.write(
'<text:a xlink:type="simple" xlink:href="%s">' % key)
self.cntnt.write(text)
self.cntnt.write('</text:a>')
return
elif mark.type == LOCAL_TARGET:
self.cntnt.write(
'<text:bookmark text:name="%s"/>' % key)
def insert_toc(self):
"""
Insert a Table of Contents at this point in the document.
"""
title = _('Contents')
self.cntnt.write('<text:table-of-content>')
self.cntnt.write('<text:table-of-content-source ' +
'text:outline-level="3" ' +
'text:use-outline-level="false">')
self.cntnt.write('<text:index-title-template ' +
'text:style-name="TOC-Title">' + title)
self.cntnt.write('</text:index-title-template>')
for level in range(1, 4):
self.cntnt.write('<text:table-of-content-entry-template ' +
'text:outline-level="%d" ' % level +
'text:style-name="TOC-Heading%d">' % level)
self.cntnt.write('<text:index-entry-chapter/>')
self.cntnt.write('<text:index-entry-text/>')
self.cntnt.write('<text:index-entry-tab-stop ' +
'style:type="right" ' +
'style:leader-char="."/>')
self.cntnt.write('<text:index-entry-page-number/>')
self.cntnt.write('</text:table-of-content-entry-template>')
self.cntnt.write('</text:table-of-content-source>')
self.cntnt.write('<text:index-body>')
self.cntnt.write('<text:index-title>')
self.cntnt.write('<text:p text:style-name="NLTOC-Title">%s</text:p>' %
title)
self.cntnt.write('</text:index-title>')
self.cntnt.write('</text:index-body>')
self.cntnt.write('</text:table-of-content>')
def insert_index(self):
"""
Insert an Alphabetical Index at this point in the document.
"""
title = _('Index')
self.cntnt.write('<text:alphabetical-index>')
self.cntnt.write('<text:alphabetical-index-source ' +
'text:ignore-case="true" ' +
'text:combine-entries="false" ' +
'text:combineentries-with-pp="false">')
self.cntnt.write('<text:index-title-template ' +
'text:style-name="IDX-Title">' + title)
self.cntnt.write('</text:index-title-template>')
self.cntnt.write('<text:alphabetical-index-entry-template ' +
'text:outline-level="1" ' +
'text:style-name="IDX-Entry">')
self.cntnt.write('<text:index-entry-text/>')
self.cntnt.write('<text:index-entry-tab-stop ' +
'style:type="right" ' +
'style:leader-char="."/>')
self.cntnt.write('<text:index-entry-page-number/>')
self.cntnt.write('</text:alphabetical-index-entry-template>')
self.cntnt.write('</text:alphabetical-index-source>')
self.cntnt.write('<text:index-body>')
self.cntnt.write('<text:index-title>')
self.cntnt.write('<text:p text:style-name="NLIDX-Title">%s</text:p>' %
title)
self.cntnt.write('</text:index-title>')
self.cntnt.write('</text:index-body>')
self.cntnt.write('</text:alphabetical-index>')
def _write_manifest(self):
"""
create the manifest.xml file
"""
self.mfile = StringIO()
# Header
self.mfile.write(
'<?xml version="1.0" encoding="UTF-8"?>\n' +
'<manifest:manifest ' +
'xmlns:manifest="urn:oasis:names:tc:opendocument' +
':xmlns:manifest:1.0">' +
'<manifest:file-entry ' +
'manifest:media-type="%s" ' % _apptype +
'manifest:full-path="/"/>'
)
# Images
for image in self.media_list:
self.mfile.write(
'<manifest:file-entry manifest:media-type="" ' +
'manifest:full-path="Pictures/' +
image[1] +
'"/>'
)
# Footer
self.mfile.write(
'<manifest:file-entry manifest:media-type="" '
'manifest:full-path="Pictures/"/>'
'<manifest:file-entry manifest:media-type="text/xml" '
'manifest:full-path="content.xml"/>'
'<manifest:file-entry manifest:media-type="text/xml" '
'manifest:full-path="styles.xml"/>'
'<manifest:file-entry manifest:media-type="text/xml" '
'manifest:full-path="settings.xml"/>'
'<manifest:file-entry manifest:media-type="text/xml" '
'manifest:full-path="meta.xml"/>'
'</manifest:manifest>\n'
)
def _write_settings(self):
"""
create the settings.xml file
"""
self.stfile = StringIO()
# This minimal settings file has been taken from
# http://mashupguide.net/1.0/html/ch17s03.xhtml (Creative commons
# licence): http://mashupguide.net/1.0/html/apas02.xhtml
self.stfile.write(
'<?xml version="1.0" encoding="UTF-8"?>\n' +
'<office:document-settings office:version="1.0"\n' +
'xmlns:config="urn:oasis:names:tc:opendocument:xmlns:config:1.0"\n' +
'xmlns:office="urn:oasis:names:tc:opendocument:xmlns:office:1.0"\n' +
'xmlns:ooo="http://openoffice.org/2004/office"\n' +
'xmlns:xlink="http://www.w3.org/1999/xlink" />'
)
def _write_mimetype_file(self):
"""
create the mimetype.xml file
"""
self.mimetype = StringIO()
self.mimetype.write('application/vnd.oasis.opendocument.text')
def _write_meta_file(self):
"""
create the meta.xml file
"""
self.meta = StringIO()
generator = PROGRAM_NAME + ' ' + VERSION
creator = self.get_creator()
date = self.time
lang = self.lang
self.meta.write(
_META_XML % locals()
)
def rotate_text(self, style, text, x, y, angle, mark=None):
"""
Used to rotate a text with an angle.
@param mark: IndexMark to use for indexing
"""
style_sheet = self.get_style_sheet()
stype = style_sheet.get_draw_style(style)
pname = stype.get_paragraph_style()
p = style_sheet.get_paragraph_style(pname)
font = p.get_font()
size = font.get_size()
height = size * (len(text))
width = 0
for line in text:
width = max(width, string_width(font, line))
wcm = ReportUtils.pt2cm(width)
hcm = ReportUtils.pt2cm(height)
rangle = radians(angle)
xloc = x - (wcm / 2.0) * cos(rangle) + (hcm / 2.0) * sin(rangle)
yloc = y - (hcm / 2.0) * cos(rangle) - (wcm / 2.0) * sin(rangle)
self._write_mark(mark, text)
self.cntnt.write(
'<draw:frame text:anchor-type="paragraph" ' +
'draw:z-index="2" ' +
'draw:style-name="clear" ' +
'svg:height="%.2fcm" ' % hcm +
'svg:width="%.2fcm" ' % wcm +
'draw:transform="' +
'rotate (%.8f) ' % -rangle +
'translate (%.3fcm %.3fcm)">\n' % (xloc, yloc) +
'<draw:text-box>\n' +
'<text:p text:style-name="X%s">' % pname +
'<text:span text:style-name="F%s">' % pname +
escape('\n'.join(text), _esc_map) +
'</text:span></text:p>\n</draw:text-box>\n' +
'</draw:frame>\n')
def draw_path(self, style, path):
"""
Draw a path
"""
minx = 9e12
miny = 9e12
maxx = 0
maxy = 0
for point in path:
minx = min(point[0], minx)
miny = min(point[1], miny)
maxx = max(point[0], maxx)
maxy = max(point[1], maxy)
self.cntnt.write(
'<draw:polygon draw:style-name="%s" ' % style +
'draw:layer="layout" ' +
'draw:z-index="1" ' +
'svg:x="%2fcm" svg:y="%2fcm" '
% (float(minx), float(miny)) +
'svg:viewBox="0 0 %d %d" '
% (int((maxx - minx) * 1000), int((maxy - miny) * 1000)) +
'svg:width="%.4fcm" ' % (maxx - minx) +
'svg:height="%.4fcm" ' % (maxy - miny)
)
point = path[0]
x1 = int((point[0] - minx) * 1000)
y1 = int((point[1] - miny) * 1000)
self.cntnt.write('draw:points="%d, %d' % (x1, y1))
for point in path[1:]:
x1 = int((point[0] - minx) * 1000)
y1 = int((point[1] - miny) * 1000)
self.cntnt.write(' %d, %d' % (x1, y1))
self.cntnt.write('"/>\n')
def draw_line(self, style, x1, y1, x2, y2):
"""
Draw a line
"""
self.cntnt.write(
'<draw:line text:anchor-type="paragraph" ' +
'draw:z-index="3" ' +
'draw:style-name="%s" ' % style +
'svg:x1="%.2fcm" ' % x1 +
'svg:y1="%.2fcm" ' % y1 +
'svg:x2="%.2fcm" ' % x2 +
'svg:y2="%.2fcm">' % y2 +
'<text:p/>\n' +
'</draw:line>\n'
)
def draw_text(self, style, text, x, y, mark=None):
"""
Draw a text
@param mark: IndexMark to use for indexing
"""
style_sheet = self.get_style_sheet()
box_style = style_sheet.get_draw_style(style)
para_name = box_style.get_paragraph_style()
pstyle = style_sheet.get_paragraph_style(para_name)
font = pstyle.get_font()
sw = ReportUtils.pt2cm(string_width(font, text))*1.3
self._write_mark(mark, text)
self.cntnt.write(
'<draw:frame text:anchor-type="paragraph" ' +
'draw:z-index="2" ' +
'draw:style-name="%s" ' % style +
'svg:width="%.2fcm" ' % sw +
'svg:height="%.2fcm" '
% (ReportUtils.pt2cm(font.get_size() * 1.4)) +
'svg:x="%.2fcm" ' % float(x) +
'svg:y="%.2fcm">' % float(y) +
'<draw:text-box> ' +
'<text:p text:style-name="F%s">' % para_name +
'<text:span text:style-name="F%s">' % para_name +
#' fo:max-height="%.2f">' % font.get_size() +
escape(text, _esc_map) +
'</text:span>' +
'</text:p>' +
'</draw:text-box>\n' +
'</draw:frame>\n'
)
def draw_box(self, style, text, x, y, w, h, mark=None):
"""
Draw a box
@param mark: IndexMark to use for indexing
"""
style_sheet = self.get_style_sheet()
box_style = style_sheet.get_draw_style(style)
para_name = box_style.get_paragraph_style()
shadow_width = box_style.get_shadow_space()
self._write_mark(mark, text)
if box_style.get_shadow():
self.cntnt.write(
'<draw:rect text:anchor-type="paragraph" ' +
'draw:style-name="%s_shadow" ' % style +
'draw:z-index="0" ' +
'draw:text-style-name="%s" ' % para_name +
'svg:width="%.2fcm" ' % w +
'svg:height="%.2fcm" ' % h +
'svg:x="%.2fcm" ' % (float(x) + shadow_width) +
'svg:y="%.2fcm">\n' % (float(y) + shadow_width) +
'</draw:rect>\n'
)
self.cntnt.write(
'<draw:rect text:anchor-type="paragraph" ' +
'draw:style-name="%s" ' % style +
'draw:text-style-name="%s" ' % para_name +
'draw:z-index="1" ' +
'svg:width="%.2fcm" ' % w +
'svg:height="%.2fcm" ' % h +
'svg:x="%.2fcm" ' % float(x) +
'svg:y="%.2fcm">\n' % float(y)
)
if text:
self.cntnt.write(
'<text:p text:style-name="%s">' % para_name +
'<text:span text:style-name="F%s">' % para_name +
escape(text, _esc_map) +
'</text:span>'
'</text:p>\n'
)
self.cntnt.write('</draw:rect>\n')
def center_text(self, style, text, x, y, mark=None):
"""
Center a text in a cell, a row, a line, ...
@param mark: IndexMark to use for indexing
"""
style_sheet = self.get_style_sheet()
box_style = style_sheet.get_draw_style(style)
para_name = box_style.get_paragraph_style()
pstyle = style_sheet.get_paragraph_style(para_name)
font = pstyle.get_font()
size = (string_width(font, text) / 72.0) * 2.54
self._write_mark(mark, text)
self.cntnt.write(
'<draw:frame text:anchor-type="paragraph" ' +
'draw:style-name="%s" ' % style +
'draw:z-index="2" ' +
'svg:width="%.2fcm" ' % size +
'svg:height="%.2fpt" ' % font.get_size() +
'svg:x="%.2fcm" ' % (x - (size / 2.0)) +
'svg:y="%.2fcm">\n' % float(y)
)
if text:
self.cntnt.write(
'<draw:text-box>' +
'<text:p text:style-name="X%s">' % para_name +
'<text:span text:style-name="F%s">' % para_name +
escape(text, _esc_map) +
'</text:span>\n' +
'</text:p>\n' +
'</draw:text-box>'
)
self.cntnt.write('</draw:frame>\n')
def process_spaces(line, format):
"""
Function to process spaces in text lines for flowed and pre-formatted notes.
line : text to process
format : = 0 : Flowed, = 1 : Preformatted
If the text is flowed (format==0), then leading spaces (after ignoring XML)
are removed. Embedded multiple spaces are reduced to one by ODF
If the text is pre-formatted (format==1). then all spaces (after ignoring XML)
are replaced by "<text:s/>"
Returns the processed text, and the number of significant (i.e. non-white-space) chars.
"""
txt = ""
xml = False
sigcount = 0
# we loop through every character, which is very inefficient, but an attempt to use
# a regex replace didn't always work. This was the code that was replaced.
# Problem, we may not replace ' ' in xml tags, so we use a regex
# self.cntnt.write(re.sub(' (?=([^(<|>)]*<[^>]*>)*[^>]*$)',
# "<text:s/>", line))
for char in line:
if char == '<' and xml == False:
xml = True
txt += char
elif char == '>' and xml == True:
xml = False
txt += char
elif xml == True:
txt += char
elif char == " " or char == "\t":
if format == 0 and sigcount == 0:
pass
elif format == 1:
#preformatted, section White-space characters of
# http://docs.oasis-open.org/office/v1.1/OS/OpenDocument-v1.1-html/OpenDocument-v1.1.html#5.1.1.White-space%20Characters|outline
txt += "<text:s/>"
else:
txt += char
else:
sigcount += 1
txt += char
return [txt, sigcount]
|
arunkgupta/gramps
|
gramps/plugins/docgen/odfdoc.py
|
Python
|
gpl-2.0
| 73,332
|
[
"Brian"
] |
f70e32ba900ef25c7c9af132ab8fd3b148e88777439a9f6ae76fc7c31e74391a
|
from disco.core import Job, result_iterator
def read_coverage_map(rec, params):
ref, read = rec
yield '%s:%d' % (ref, read.pos), read.qlen
def chr_partition(key, nrp, params):
chr = key.split(':')[0]
if chr == 'X': return 24
elif chr == 'Y': return 25
elif chr == 'MT': return 0
else:
try:
return int(chr)
except ValueError:
pass
#print chr, key
def coverage_reduce(reduce_iter, params):
import numpy
chrs = { # Chromosome sizes
'1':250000000,
'2':250000000,
'3':200000000,
'4':200000000,
'5':200000000,
'6':200000000,
'7':160000000,
'8':150000000,
'9':150000000,
'10':150000000,
'11':150000000,
'12':150000000,
'13':150000000,
'14':150000000,
'15':150000000,
'16':100000000,
'17':100000000,
'18':100000000,
'19':100000000,
'20':100000000,
'21':100000000,
'22':100000000,
'X':200000000,
'MT':250000000,
'Y':100000000 }
p, l = iter(reduce_iter).next()
chr, pos = p.split(':')
c = numpy.zeros(chrs[chr])
for p, l in reduce_iter:
chr, pos = p.split(':')
pos = int(pos); l = int(l)
c[pos:pos+l] += 1
yield (chr, ' '.join((str(int(i)) for i in c)))
def sam_url_reader(stream, size, url, params):
import tempfile
import pysam
cache = tempfile.NamedTemporaryFile(dir='/mnt')
BLOCK_SIZE = 4*(1024**2)
block = stream.read(BLOCK_SIZE)
while block != "":
cache.write(block)
block = stream.read(BLOCK_SIZE)
sam = pysam.Samfile(cache.name)
for read in sam:
yield (sam.getrname(read.tid), read)
sam.close()
cache.close()
job = Job().run(
input = ['http://s3.amazonaws.com/1000genomes/data/HG00096/alignment/HG00096.chrom11.ILLUMINA.bwa.GBR.low_coverage.20111114.bam',],
#input = ['http://s3.amazonaws.com/1000genomes/data/HG00096/alignment/HG00096.mapped.ILLUMINA.bwa.GBR.low_coverage.20111114.bam'],
map_reader = sam_url_reader,
partition = chr_partition,
partitions = 26,
map=read_coverage_map,
reduce=coverage_reduce)
filePath = '/mnt/'
for chr, coverage in result_iterator(job.wait(show=True)):
out = open(filePath+chr+'_coverage-HG00096.out', 'w')
out.write('%s %s\n' % (chr, coverage))
import os
from data_binner import makePlot
fileHandleList = (fname for fname in os.listdir('/mnt') if fname.endswith('.out'))
map(makePlot,fileHandleList)
|
ContinuumIO/Examples
|
1000Genomes/MapReduce_1000Genomes.py
|
Python
|
mit
| 2,652
|
[
"BWA",
"pysam"
] |
12b1035c8a73bad4ff8aa97a6ca40cbb4e2739b8c4d165f3051ce70307d2bd9f
|
from __future__ import division, unicode_literals, absolute_import
import os, tempfile, copy, math, itertools, sys
import numpy as np
from operator import itemgetter
from itertools import product
try:
import scipy
except:
print('functions.py: no scipy, smoother() will not work()')
from siman import header
from siman.header import print_and_log, printlog, runBash, eV_A_to_J_m
from siman.small_functions import is_list_like, is_string_like, gunzip_file, makedir, grep_file, setting_sshpass
def unique_elements(seq, idfun=None):
# return only unique_elements order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def smoother(x, n, mul = 1, align = 1):
"""
mul - additionally multiplies values
#align - find first non-zero point and return it to zero
#n - smooth value,
if algo = 'gaus' than it is sigma
use something like 0.8
if algo = 'my'
n of 10-15 is good
"""
algo = 'gaus'
# algo = 'my'
if algo == 'my':
x_smooth = []
L = len(x)
store = np.zeros((n,1),float)
for u in range(L-n):
for v in range(n):
store[v] = x[u+v]
av = float(sum(store)) / n
x_smooth.append(av*mul)
for u in range(L-n,L):
for v in range(L-u-1):
store[v] = x[u+v]
av = float(sum(store)) / n
x_smooth.append(av*mul)
elif algo == 'gaus':
x_smooth =x
# x_smooth = scipy.ndimage.filters.median_filter(x,size =4)
# print('sigma is ', n)
x_smooth = scipy.ndimage.filters.gaussian_filter1d(x_smooth, n, order =0)
# x_smooth = scipy.ndimage.interpolation.spline_filter1d(x, 4)
else:
x_smooth = x
if align:
# print(x_smooth[0])
x_smooth[0] = 0
# sys.exit()
return np.asarray(x_smooth)
def run_on_server(command, addr = None):
printlog('Running', command, 'on server ...')
command = command.replace('\\', '/') # make sure is POSIX
# sys.exit()
# print(header.sshpass)
# sys.exit()
if addr is None:
addr = header.cluster_address
if header.ssh_object:
# printlog('Using paramiko ...', imp = 'y')
# if 'ne' in header.warnings:
# sys.exit()
out = header.ssh_object.run(command, noerror = True, printout = 'ne' in header.warnings)
elif header.sshpass and header.sshpass == 'proxy':
com = 'ssh -tt sdv sshpass -f '+ header.path2pass +' ssh '+addr+' "'+command+'"'
# print(com)
# sys.exit()
out = runBash(com)
# print(out)
out = out.split('Connection to')[0] # remove last message Connection to ipaddress closed
# sys.exit()
elif header.sshpass:
com = 'sshpass -f '+header.path2pass+' ssh '+addr+' "'+command+'"'
# print(com)
# sys.exit()
out = runBash(com)
# sys.exit()
else:
bash_comm = 'ssh '+addr+' "'+command+'"'
# print(bash_comm)
# sys.exit()
out = runBash(bash_comm)
out = out.split('#')[-1].strip()
printlog(out)
# print(out)
# sys.exit()
return out
def push_to_server(files = None, to = None, addr = None):
"""
if header.ssh_object then use paramiko
to (str) - path to remote folder !
"""
if not is_list_like(files):
files = [files]
to = to.replace('\\', '/') # make sure is POSIX
files_str = ' '.join(np.array(files ))
command = ' mkdir -p {:}'.format( to )
# print('asfsadfdsf', to)
printlog('push_to_server():', command, run_on_server(command, addr))
# sys.exit()
printlog('push_to_server(): uploading files ', files, 'to', addr, to)
if header.ssh_object:
for file in files:
# print(file, to)
header.ssh_object.put(file, to+'/'+os.path.basename(file) )
out = ''
elif header.sshpass and header.sshpass == 'proxy':
com = 'tar cf - '+ files_str + ' | ssh sdv "sshpass -f ~/.ssh/p ssh '+addr+' \\"cd '+header.cluster_home+' && tar xvf -\\"" '
# print(com)
# sys.exit()
out = runBash(com)
# print(out)
# sys.exit()
elif header.sshpass:
# if '@' not in addr:
# printlog('Error! Please provide address in the form user@address')
# l = addr.split('@')
# print(l)
# user = l[0]
# ad = l[1]
# com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+files_str+ ' '+addr+':'+to
com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+files_str+ ' '+addr+':'+to
# print(com)
# sys.exit()
out = runBash(com)
else:
out = runBash('rsync -uaz '+files_str+ ' '+addr+':'+to)
printlog(out)
return out
def file_exists_on_server(file, addr):
file = file.replace('\\', '/') # make sure is POSIX
printlog('Checking existence of file', file, 'on server', addr )
exist = run_on_server(' ls '+file, addr)
# if header.ssh_object:
# exist = header.ssh_object.fexists(file)
# else:
# exist = runBash('ssh '+addr+' ls '+file)
if 'No such file' in exist:
exist = ''
else:
exist = 'file exists'
if exist:
res = True
else:
res = False
printlog('File exist? ', res)
return res
def get_from_server(files = None, to = None, to_file = None, addr = None, trygz = True):
"""
Download files using either paramiko (higher priority) or rsync;
For paramiko header.ssh_object should be defined
files (list of str) - files on cluster to download
to (str) - path to local folder !
to_file (str) - path to local file (if name should be changed); in this case len(files) should be 1
The gz file is also checked
RETURN
result of download
TODO:
now for each file new connection is opened,
copy them in one connection
"""
# print(addr)
# sys.exit()
def download(file, to_file):
# print(header.sshpass)
if header.ssh_object:
exist = file_exists_on_server(file, addr)
# try:
if exist:
printlog('Using paramiko: ssh_object.get(): from to ', file, to_file)
header.ssh_object.get(file, to_file )
out = ''
# except FileNotFoundError:
else:
out = 'error, file not found'
elif header.sshpass and header.sshpass == 'proxy':
# com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar zcf - '+ file +'\\"" | tar zxf - '+to_file # does not work?
com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar cf - '+ file +'\\"" > '+to_file
# print('sshpass',com)
# sys.exit()
out = runBash(com)
elif header.sshpass:
#com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+addr+':'+file+ ' '+to_file
com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+addr+':'+file+ ' '+to_file
out = runBash(com)
# print(addr)
# sys.exit()
else:
# print(addr,file,to_file)
out = runBash('rsync -uaz '+addr+':'+file+ ' '+to_file)
if 'error' in out:
res = out
else:
res = 'OK'
out = ''
printlog('Download result is ', res)
return out
if '*' in files:
printlog('get_from_server(): get by template')
files = run_on_server('ls '+files, addr).splitlines()
# print(files)
# sys.exit()
printlog('get_from_server(): I download', files)
elif not is_list_like(files):
files = [files]
files = [file.replace('\\', '/') for file in files] #make sure the path is POSIX
files_str = ', '.join(np.array(files ))
printlog('Trying to download', files_str, 'from server', imp = 'n')
for file in files:
if not to and not to_file: #use temporary file
with tempfile.NamedTemporaryFile() as f:
to_file_l = f.name #system independent filename
elif not to_file: #obtain filename
to_file_l = os.path.join(to, os.path.basename(file) )
else:
to_file_l = to_file
makedir(to_file_l)
out = download(file, to_file_l)
if out and trygz:
printlog('File', file, 'does not exist, trying gz', imp = 'n')
# run_on_server
files = run_on_server(' ls '+file+'*', addr)
file = files.split()[-1]
# print(file)
nz = file.count('gz')
ext = '.gz'*nz
# file+='.gz'
to_file_l+=ext
if file:
out = download(file, to_file_l)
printlog(' gz found with multiplicity', ext, imp = 'n')
for i in range(nz):
printlog('unzipping', to_file_l)
gunzip_file(to_file_l)
to_file_l = to_file_l[:-3]
else:
printlog(' No gz either!', imp = 'n')
# if '5247' in file:
# sys.exit()
return out
def salary_inflation():
"""Calculate salary growth in Russia taking into account inflation"""
inflation2000_2014 = [
5.34,
6.45,
6.58,
6.10,
8.78,
8.80,
13.28,
11.87,
9.00 ,
10.91,
11.74,
11.99,
15.06,
18.8,
20.1]
init_salary = 1500 # in jan 2000; other sources 2000 - very important
for i, l in enumerate( reversed(inflation2000_2014) ):
init_salary = (1+l/100)*init_salary
print( init_salary, i+2000)
salary2014 = 30000
increase = salary2014/init_salary
print( increase)
# salary_inflation()
def element_name_inv(el):
el_dict = header.el_dict
nu_dict = header.nu_dict
# print type(el), el, type(str('sdf') )
if is_string_like(el):
try:
elinv = el_dict[el]
except:
print_and_log("Error! Unknown element: " +str(el))
raise RuntimeError
else:
el = int(el)
try:
elinv = nu_dict[el]
except:
print_and_log("Error! Unknown element: "+str(el))
raise RuntimeError
return elinv # inversed notion of element
invert = element_name_inv
def return_atoms_to_cell(st):
st = st.return_atoms_to_cell()
return st
def calc_ac(a1, c1, a2, c2, a_b = 0.1, c_b = 0.1, type = "two_atoms"):
"""
Calculate values of hexagonal lattice parameters for cell with two different atoms.
The used assumption is:
1. Provided lattice constants are for large enougth cells, in which excess volume (dV) of impurity does not depend on the size of cell.
2. Two atoms do not interact with each other, which allows to use dV(CO) = dV(C) + dV(O)
Two regimes:
two_atoms - calculate cell sizes if additional atom was added
double_cell - if cell was doubled; only first cell and second_cell are needed
Input:
a1, c1 - lattice constants of cell with first impurity atom (first cell)
a2, c2 - lattice constants of cell with second impurity atom (second cell)
a_b, c_b - lattice constants of cell with pure hexagonal metall
Output:
a, c - lattice constants of cell with two atoms
"""
hstring = ("%s #on %s"% (traceback.extract_stack(None, 2)[0][3], datetime.date.today() ) )
if hstring != header.history[-1]: header.history.append( hstring )
A = (a1**2 * c1) + (a2**2 * c2) - (a_b**2 * c_b)
B = 0.5 * (c1/a1 + c2/a2)
C = ( (a1**2 * c1) + (a2**2 * c2) ) * 0.5 #sum of cell volumes divided by 2 since during the construction of new cell we will use multiplication by 2
# print "A,B=",A,B
a = (A/B)**(1./3)
c = a * B
a = round(a,5)
c = round(c,5)
print_and_log( "a, c, c/a for cell with pure hcp ", a_b, c_b, round(c_b/a_b,4), imp ='y' )
print_and_log( "a, c, c/a for cell with first atom ", a1, c1, round(c1/a1,4), imp ='y' )
print_and_log( "a, c, c/a for cell with second atom ", a2, c2, round(c2/a2,4), imp ='y' )
#for double cell
a3 = (C/B)**(1./3)
c3 = a3 * B
a3 = round(a3,5)
c3 = round(c3,5)
if type == "two_atoms":
print_and_log( "a, c, c/a for cell with two atoms ", a, c, round(c/a,4), "# the same cell but with two atoms\n", imp ='y')
elif type == "double_cell":
print_and_log( "a, c, c/a for new cell ", a3, c3, round(c3/a3,4), "# for cell with V = V(first_cell) + V(second cell), but only for the case if V(second cell) == V(first_cell)", imp ='y')
return a, c
def read_charge_den_vasp():
"""
Read CHG vasp file and return ChargeDen object
"""
class ChargeDen():
"""docstring for ChargeDen"""
def __init__(self, ):
# self.arg = arg
pass
def rotation_matrix(axis,theta):
axis = axis/math.sqrt(np.dot(axis,axis))
a = math.cos(theta/2)
b,c,d = -axis*math.sin(theta/2)
return np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
def rotate():
v = np.array([3,5,0])
axis = np.array([4,4,1])
theta = 1.2
print(np.dot(rotation_matrix(axis,theta),v))
# [ 2.74911638 4.77180932 1.91629719]
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
def plot_charge_den():
"""Test function; Was not used"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
# print X
# print Y
# print Z
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)
# cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlabel('X')
ax.set_xlim(-40, 40)
ax.set_ylabel('Y')
ax.set_ylim(-40, 40)
ax.set_zlabel('Z')
ax.set_zlim(-100, 100)
plt.show()
return
def plot_interaction(calclist, calc):
"""
For calculation of interaction parameter alpha;
Take in mind that this parameter is obtained under aproximation of redular solution
"""
e_seg = []
dX = []
for id in calclist:
Xgb = calc[id].Xgb
X = calc[id].X
dX.append(Xgb/1 - X)
e_seg.append(calc[id].e_seg)
# print calc[id].e_seg
# print calc[id].X
#print dX
coeffs1 = np.polyfit(dX, e_seg, 1)
fit_func1 = np.poly1d(coeffs1)
print( "list of seg energies: ", e_seg )
print( "list of dX : ", dX )
print( "Fitting using linear function:" )
print( fit_func1 )
print( "E_seg0 = {0:0.0f} meV, standart enthalpy of segregation".format(fit_func1[0]) )
print( "alpha = {0:0.0f} meV, interaction coefficient".format(-fit_func1[1]/2) )
return
def calculate_voronoi(self, state = 'end'):
# By default two quantities per atom are calculated by this compute.
# The first is the volume of the Voronoi cell around each atom.
# Any point in an atom's Voronoi cell is closer to that atom than any other.
# The second is the number of faces of the Voronoi cell, which
# is also the number of nearest neighbors of the atom in the middle of the cell.
# state - init or end; if init then saved in self.init.vorovol; if end than saved in self.vorovol
write_lammps(self, state, filepath = 'voronoi_analysis/structure.lammps') #write structure for lammps
runBash("rm voronoi_analysis/dump.voro; /home/aksenov/installed/lammps-1Feb14/src/lmp_serial < voronoi_analysis/voronoi.in > voronoi_analysis/log")
if state == 'end':
self.vorovol = []
self.vorofaces = []
vorovol = self.vorovol
vorofaces = self.vorofaces
elif state == 'init':
self.init.vorovol = []
self.init.vorofaces = []
vorovol = self.init.vorovol
vorofaces = self.init.vorofaces
vsum=0
wlist = []
with open('voronoi_analysis/dump.voro','r') as volfile: #analyze dump.voro
for line in volfile:
if 'ITEM: ATOMS ' in line:
break
for line in volfile:
ll = line.split()
if int(ll[1]) > 1:
wlist.append( [ll[0], ll[5], ll[6], ll[2]] )
# print 'Volume of atom ',ll[0],'is', ll[5]
vsum= vsum+float(ll[5])
print_and_log( 'Check total volume ', vsum, self.end.vol)
wlist.sort(key = itemgetter(0)) #sort according to the position of atoms
print_and_log( "atom #, voronoi vol, voronoi faces, x coordinate: ", )
print_and_log( wlist)
for w in wlist:
vorovol.append(float(w[1]))
vorofaces.append(int(w[2]))
# print 'Voro vol ',self.end.vorovol
# print 'Voro faces',self.end.vorofaces
# print len(wlist)
if hasattr(self, 'vorovol'):
voro = ''
if len(vorovol) == 2: #C and O
voro = " {0:5.2f} & {1:2d} & {2:5.2f} & {3:2d} ".format(vorovol[0], vorofaces[0], vorovol[1], vorofaces[1] ).center(25)
else:
voro = " {0:5.2f} & {1:2d} ".format(vorovol[0], vorofaces[0] ).center(25)
voro+='&'
else:
voro = ""
print_and_log( "Voronoi volume = ", voro, imp = 'y')
return voro
def log_history(hstring):
try:
if hstring != header.history[-1]: header.history.append( hstring )
except:
header.history.append( hstring )
return
def gb_energy_volume(gb,bulk):
if (gb.end.rprimd[1] != bulk.end.rprimd[1]).any() or (gb.end.rprimd[2] != bulk.end.rprimd[2]).any():
print_and_log("Warning! You are trying to calculate gb_energy from cells with different lateral sizes:"+str(gb.end.rprimd)+" "+str(bulk.end.rprimd)+"\n")
#print bulk.vol
V_1at = bulk.vol / bulk.natom #* to_ang**3
E_1at = bulk.energy_sigma0 / bulk.natom
A = np.linalg.norm( np.cross(gb.end.rprimd[1], gb.end.rprimd[2]) ) #surface area of gb
#print A
gb.v_gb = ( gb.vol - V_1at * gb.natom) / A / 2. * 1000
gb.e_gb = ( gb.energy_sigma0 - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000
gb.e_gb_init = ( gb.list_e_sigma0[0] - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000
gb.bulk_extpress = bulk.extpress
#print "Calc %s; e_gb_init = %.3f J/m^2; e_gb = %.3f J/m; v_gb = %.3f angstrom "%(gb.name, gb.e_gb_init, gb.e_gb, gb.v_gb )
outst = "%15s&%7.0f&%7.0f"%(gb.name, gb.e_gb, gb.v_gb)
return outst
def headers():
j = (7,12,14,7,8,9,9,5,5,20,5,20,8,12,20,8,5,8,8)
d="&"
header_for_bands= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"nband"+d+"Added, \%"+"\\\\"
header_for_ecut= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"Ecut,eV"+"\\\\"
header_for_npar= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"NPAR".center(j[16])+d+"LPLANE".center(j[17])+"\\\\"
header_for_kpoints= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"k-mesh".center(j[8])+d+"k-spacings".center(j[9])+d+"nkpt".center(j[10])+"\\\\"
header_for_tsmear= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"k-mesh".center(j[8])+d+"tsmear, meV".center(j[13])+d+"Smearing error, meV/atom".center(j[14])+"\\\\"
header_for_stress= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"Stress, intr u.*1000".center(j[11])+d+"Pressure, MPa".center(j[12])
#print "\\hline"
return header_for_kpoints
def read_vectors(token, number_of_vectors, list_of_words, type_func = None, lists = False):
"""Returns the list of numpy vectors for the last match"""
# lists - return list of lists instead list of vectors
if type_func is None:
type_func = lambda a : float(a)
number_of_matches = list_of_words.count( token )
if number_of_matches == 0:
#print_and_log("Warning token '"+token+"' was not found! return empty\n")
return [None]
if number_of_matches > 1:
print_and_log("Warning token '"+token+"' was found more than one times\n")
raise RuntimeError
index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match
#print list_of_words[index]
list_of_vectors = []
list_of_lists = []
vector = np.zeros((3))
for i in range(number_of_vectors):
vector[0] = type_func(list_of_words[index + 1])
vector[1] = type_func(list_of_words[index + 2])
vector[2] = type_func(list_of_words[index + 3])
list3 = []
for j in 1,2,3:
list3.append(type_func(list_of_words[index + j]) )
index+=3
list_of_vectors.append(vector.copy())
list_of_lists.append(list3)
if lists:
out = list_of_lists
else:
out = list_of_vectors
return out
def read_string(token, length, string):
sh = len(token)+1
i = string.find(token)+sh
# print('length', i, i+length)
# sys.exit()
if i is -1:
return ''
else:
return string[i:i+length]
def read_list(token, number_of_elements, ttype, list_of_words):
"""Input is token to find, number of elements to read, type of elements and list of words,
where to search
Returns the list of elements for the last match"""
number_of_matches = list_of_words.count( token )
#if number_of_elements == 0: raise RuntimeError
if number_of_matches > 1:
print_and_log("Warning token '"+token+"' was found more than one times\n")
raise RuntimeError
if number_of_matches == 0 or number_of_elements == 0:
#print_and_log("Warning token '"+token+"' was not found or asked number of elements is zero! set to [None]\n")
#if ttype == str:
# return ['']*number_of_elements
#else:
# return [0]*number_of_elements
return [None]
try:
index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match
except ValueError:
print_and_log("Warning!, token "+token+" was not found. I return [None]!\n")
return [None]
index+=1 #the position of token value
list_of_elements = []
#define function dependig on type:
# print('string 839 functions.py Blind Guardian! token', token)
if ttype == int :
def convert(a):
return int(a)
elif ttype == float:
def convert(a):
# print a
return float(a)
elif ttype == str :
def convert(a):
return str(a)
#print list_of_words[index], type(list_of_words[index])
if list_of_words[index] == "None" :
def convert(a):
return [None]
#Make convertion
for i in range(number_of_elements):
if 'None' in list_of_words[index]:
list_of_elements.append(None)
else:
list_of_elements.append( convert( list_of_words[index] ) )
index+=1
return list_of_elements
def words(fileobj):
"""Generator of words. However does not allow to use methods of list for returned"""
for line in fileobj:
for word in line.split():
yield word
def server_cp(copy_file, to, gz = True, scratch = False, new_filename = None):
if scratch:
if not header.PATH2ARCHIVE:
printlog('Warning! PATH2ARCHIVE is empty! Please put path archive in ~/simanrc.py or ./project_conf.py ')
copy_file = header.PATH2ARCHIVE + '/' + copy_file
else:
copy_file = header.project_path_cluster + '/' + copy_file
filename = os.path.basename(copy_file)
if new_filename is None:
new_filename = filename
if gz:
command = 'cp '+copy_file + ' ' + to +'/'+new_filename + '.gz ; gunzip -f '+ to+ '/'+new_filename+'.gz'
else:
command = 'cp '+copy_file + ' ' + to +'/'+new_filename
printlog('Running on server', command, imp = '')
if file_exists_on_server(copy_file, header.cluster_address):
out = run_on_server(command, addr = header.cluster_address)
printlog('Output of run_on_server', out, imp = '')
else:
out = 'error, file does not exist on server: '+copy_file
return out
def wrapper_cp_on_server(file, to, new_filename = None):
"""
tries iterativly scratch and gz
"""
copy_to = to
copy_file = file
filename = os.path.basename(file)
if new_filename:
app = 'with new name '+new_filename
else:
app = ''
for s, gz in product([0,1], ['', '.gz']):
printlog('scratch, gz:', s, gz)
out = server_cp(copy_file+gz, to = to, gz = gz, scratch = s, new_filename = new_filename)
if out == '':
printlog('File', filename, 'was succesfully copied to',to, app, imp = 'y')
break
# else:
else:
printlog('Warning! File was not copied, probably it does not exist. Try using header.warnings = "neyY" for more details', imp = 'y')
return
def update_incar(parameter = None, value = None, u_ramp_step = None, write = True, f = None, run = False, st = None):
"""Modifications of INCAR. Take attention that *parameter* will be changed to new *value*
if it only already exist in INCAR. *u_ramp_step*-current step to determine u,
*write*-sometimes just the return value is needed.
Returns U value corresponding to *u_ramp_step*.
"""
self = st
u_step = None
if parameter == 'LDAUU':
#Update only non-zero elements of LDAUU with value
set_LDAUU_list = self.set.vasp_params['LDAUU']
new_LDAUU_list = copy.deepcopy(set_LDAUU_list)
# print set_LDAUU_list
u_step = 0.0
for i, u in enumerate(set_LDAUU_list):
if u == 0:
continue
u_step = np.linspace(0, u, self.set.u_ramping_nstep)[u_ramp_step]
u_step = np.round(u_step, 1)
# new_LDAUU_list[i] = value
new_LDAUU_list[i] = u_step
new_LDAUU = 'LDAUU = '+' '.join(['{:}']*len(new_LDAUU_list)).format(*new_LDAUU_list)
command = "sed -i.bak '/LDAUU/c\\" + new_LDAUU + "' INCAR\n"
#print('u_step',u_step)
#sys.exit()
elif parameter == 'MAGMOM':
new_incar_string = parameter + ' = ' + ' '.join(['{:}']*len(value)).format(*value)
command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n"
# elif parameter in ['IMAGES', 'ISPIN']:
else:
new_incar_string = parameter + ' = ' + str(value)
command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n"
if write and f:
f.write(command)
if run:
runBash(command)
return u_step #for last element
def check_output(filename, check_string, load):
"""
Check if file exist and it is finished by search for check_string
"""
if filename and os.path.exists(filename):
out = grep_file(check_string, filename, reverse = True)
printlog('The grep result of',filename, 'is:', out)
# sys.exit()
if check_string in out or 'un' in load:
state = '4. Finished'
else:
state = '5. Broken outcar'
else:
state = '5. no OUTCAR'
return state
|
dimonaks/siman
|
siman/functions.py
|
Python
|
gpl-2.0
| 29,756
|
[
"LAMMPS",
"VASP"
] |
461a7e2bd315d5579b567472a8af4b1ef9247d525078e4b08e61d9e97d0028b1
|
"""
FROWNS LICENSE
Copyright (c) 2001-2003, Brian Kelley
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Brian Kelley nor the names of frowns
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# Build a simple Molecule object given the events from the Smiles
# tokenizer.
import string
from pinky.smiles import handler
import weakref
from ..mol import Atom, Bond, Molecule
# bondlookup is of the form
# textSymbol, bondsymbol, bondorder, bondtype, equiv class, stereo
STEREO_NONE = None
STEREO_UP = "UP"
STEREO_DOWN = "DOWN"
BONDLOOKUP = {'-': ('-', 1, 1, 1, STEREO_NONE),
'=': ('=', 2, 2, 2, STEREO_NONE),
'#': ('#', 3, 3, 3, STEREO_NONE),
'\\': ('\\',1, 1, 1, STEREO_DOWN),
'/': ('/' ,1, 1, 1, STEREO_UP),
':':(':', 1.5, 4, 4, STEREO_NONE),
}
def get_symbol_aromatic(text):
if text[0] in "cnosp":
return text.upper(), 1
return text, 0
def normalize_closure(text):
if text[:1] == "%":
return int(text[1:])
return int(text)
implicit_bond = -123
class DummyVFGraph:
def __init__(self):
self.atoms = -1
def InsertNode(self, node):
self.atoms += 1
return self.atoms
def InsertEdge(self, index1, index2, bond):
pass
class BuildMol(handler.TokenHandler):
def begin(self):
self.closures = {}
self.atoms = []
self.bonds = []
self._atom = None
self._prev_atoms = []
# None occurs after a '.'
# implicit_bond means implicit single bond
self._pending_bond = None
def end(self):
if len(self._prev_atoms) >= 2:
raise AssertionError("Missing ')'")
if self._pending_bond not in [implicit_bond, None]:
raise AssertionError("Missing an atom after the bond")
if self.closures:
raise AssertionError("Missing closures for %s" %
(self.closures.keys(),))
self.mol = Molecule(self.atoms, self.bonds)
def add_token(self, field, pos, text):
getattr(self, "do_" + field)(text)
def add_atom(self, atom):
atoms = self.atoms
atom.index = len(atoms)
atoms.append(atom)
if self._pending_bond == implicit_bond:
# Implicit single or aromatic bond
self._pending_bond = Bond()
if self._pending_bond is not None:
bond = self._pending_bond
prev_atom = self._prev_atoms[-1]
bond.atoms[:] = [prev_atom, atom]
##self.mol.add_bond(bond, prev_atom, atom)
bond.atoms = [prev_atom, atom]
atom.bonds.append(bond)
prev_atom.bonds.append(bond)
atom.oatoms.append(prev_atom)
prev_atom.oatoms.append(atom)
self.bonds.append(bond)
self._pending_bond = implicit_bond
if not self._prev_atoms:
self._prev_atoms.append(atom)
else:
self._prev_atoms[-1] = atom
#self.mol.atoms.append(atom)
def do_raw_atom(self, text):
atom = Atom()
symbol, atom.aromatic = get_symbol_aromatic(text)
atom.set_symbol(symbol)
self.add_atom(atom)
def do_open_bracket(self, text):
self._atom = Atom()
self._atom.has_explicit_hcount = True
def do_weight(self, text):
self._atom.weight = int(text)
def do_element(self, text):
symbol, self._atom.aromatic = get_symbol_aromatic(text)
self._atom.set_symbol(symbol)
def do_chiral_count(self, text):
#print "setting chirality", self._atom, int(text[1:])
self._atom.chirality = int(text[1:])
def do_chiral_named(self, text):
self._atom.chiral_class = text[1:3]
self._atom.chirality = int(text[3:])
def do_chiral_symbols(self, text):
self._atom.chiral_class = len(text)
def do_hcount(self, text):
if text == "H":
self._atom.explicit_hcount = 1
else:
self._atom.explicit_hcount = int(text[1:])
def do_positive_count(self, text):
self._atom.charge = int(text[1:])
def do_positive_symbols(self, text):
self._atom.charge = len(text)
def do_negative_count(self, text):
self._atom.charge = -int(text[1:])
def do_negative_symbols(self, text):
self._atom.charge = -len(text)
def do_close_bracket(self, text):
self.add_atom(self._atom)
self._atom = None
def do_bond(self, text):
assert self._pending_bond in (implicit_bond, None)
symbol, bondorder, bondtype, equiv_class, stereo = BONDLOOKUP[text]
# if the bond came in as aromatic (which it
# CAN'T!))
if bondtype == 4:
assert 0, "Bond's shouldn't come in as ':'"
fixed = 0
else:
fixed = 1
bond = Bond(text, bondorder, bondtype, fixed, stereo)
bond.equiv_class = equiv_class
self._pending_bond = bond
def do_dot(self, text):
assert self._pending_bond in (implicit_bond, None)
self._pending_bond = None
def do_closure(self, text):
num = normalize_closure(text)
if num in self.closures:
prev_atom, bond = self.closures[num]
del self.closures[num]
assert self._pending_bond is not None, "Can't happen"
if self._pending_bond is not implicit_bond and \
bond is not implicit_bond and \
self._pending_bond.symbol != "-": # according to toolkit
# need to verify they are compatible
prev_symbol = bond.symbol
symbol = self._pending_bond.symbol
if (prev_symbol == symbol) or \
(prev_symbol == "/" and symbol == "\\") or \
(prev_symbol == "\\" and symbol == "/"):
pass
else:
raise AssertionError("bond types don't match")
elif bond is implicit_bond and self._pending_bond is not implicit_bond:
# see if one of the bonds is not implicit and keep it
bond = self._pending_bond
elif bond is implicit_bond:
# both are implicit so make a new one
bond = Bond()
bond._closure = 1
atom = self._prev_atoms[-1]
if prev_atom is atom:
raise AssertionError("cannot close a ring with itself")
bond.atoms[:] = [prev_atom, atom]
prev_atom._closure = 1
atom._closure = 1
##self.mol.add_bond(bond, prev_atom, atom)
bond.atoms = [prev_atom, atom]
atom.bonds.append(bond)
prev_atom.bonds.append(bond)
atom.oatoms.append(prev_atom)
prev_atom.oatoms.append(atom)
self.bonds.append(bond)
else:
self.closures[num] = (self._prev_atoms[-1], self._pending_bond)
self._pending_bond = implicit_bond
def do_open_branch(self, text):
self._prev_atoms.append(self._prev_atoms[-1])
def do_close_branch(self, text):
self._prev_atoms.pop()
|
ubccr/pinky
|
pinky/smiles/builder.py
|
Python
|
bsd-2-clause
| 8,899
|
[
"Brian"
] |
1c5476cdb4978c71d6f7425277707818903ec04af009b5a38c8899d8a82c4575
|
#!/usr/local/bin/python
# Copyright (C) 2004 Rune Linding & Lars Juhl Jensen - EMBL
# The DisEMBL is licensed under the GPL license
# (http://www.opensource.org/licenses/gpl-license.php)
# DisEMBL pipeline
# Modified to work with current versions of Biopython (1.7+)
# by Shyam Saladi ([email protected]), Janauary 2013
# Bio:SeqIO completely replaces Bio:Fasta
from string import *
from sys import argv
from Bio import File
from Bio import SeqIO
import fpformat
import sys
import tempfile
import os
from os import system,popen3
# change these to the correct paths
NN_bin = '/PATH/DisEMBL-1.4/disembl'
SG_bin = '/PATH/DisEMBL-1.4/sav_gol'
def JensenNet(sequence):
outFile = tempfile.mktemp()
inFile= tempfile.mktemp()
open(inFile,'w').write(sequence+'\n')
system(NN_bin + '< ' + inFile +' > ' + outFile)
REM465 = []
COILS = []
HOTLOOPS = []
resultsFile = open(outFile,'r')
results = resultsFile.readlines()
resultsFile.close()
for result in results:
coil = float(fpformat.fix(split(result)[0],6))
COILS.append(coil)
hotloop = float(fpformat.fix(split(result)[1],6))
HOTLOOPS.append(hotloop)
rem465 = float(fpformat.fix(split(result)[2],6))
REM465.append(rem465)
os.remove(inFile)
os.remove(outFile)
return COILS, HOTLOOPS, REM465
def SavitzkyGolay(window,derivative,datalist):
if len(datalist) < 2*window:
window = len(datalist)/2
elif window == 0:
window = 1
stdin, stdout, stderr = popen3(SG_bin + ' -V0 -D' + str(derivative) + ' -n' + str(window)+','+str(window))
for data in datalist:
stdin.write(`data`+'\n')
try:
stdin.close()
except:
print stderr.readlines()
results = stdout.readlines()
stdout.close()
SG_results = []
for result in results:
f = float(fpformat.fix(result,6))
if f < 0:
SG_results.append(0)
else:
SG_results.append(f)
return SG_results
def getSlices(NNdata, fold, join_frame, peak_frame, expect_val):
slices = []
inSlice = 0
for i in range(len(NNdata)):
if inSlice:
if NNdata[i] < expect_val:
if maxSlice >= fold*expect_val:
slices.append([beginSlice, endSlice])
inSlice = 0
else:
endSlice += 1
if NNdata[i] > maxSlice:
maxSlice = NNdata[i]
elif NNdata[i] >= expect_val:
beginSlice = i
endSlice = i
inSlice = 1
maxSlice = NNdata[i]
if inSlice and maxSlice >= fold*expect_val:
slices.append([beginSlice, endSlice])
i = 0
while i < len(slices):
if i+1 < len(slices) and slices[i+1][0]-slices[i][1] <= join_frame:
slices[i] = [ slices[i][0], slices[i+1][1] ]
del slices[i+1]
elif slices[i][1]-slices[i][0]+1 < peak_frame:
del slices[i]
else:
i += 1
return slices
def reportSlicesTXT(slices, sequence):
if slices == []:
s = lower(sequence)
else:
if slices[0][0] > 0:
s = lower(sequence[0:slices[0][0]])
else:
s = ''
for i in range(len(slices)):
if i > 0:
sys.stdout.write(', ')
sys.stdout.write( str(slices[i][0]+1) + '-' + str(slices[i][1]+1) )
s = s + upper(sequence[slices[i][0]:(slices[i][1]+1)])
if i < len(slices)-1:
s = s + lower(sequence[(slices[i][1]+1):(slices[i+1][0])])
elif slices[i][1] < len(sequence)-1:
s = s + lower(sequence[(slices[i][1]+1):(len(sequence))])
print ''
print s
def runDisEMBLpipeline():
try:
smooth_frame = int(sys.argv[1])
peak_frame = int(sys.argv[2])
join_frame = int(sys.argv[3])
fold_coils = float(sys.argv[4])
fold_hotloops = float(sys.argv[5])
fold_rem465 = float(sys.argv[6])
file = str(sys.argv[7])
try:
mode = sys.argv[8]
except:
mode = 'default'
except:
print '\nDisEMBL.py smooth_frame peak_frame join_frame fold_coils fold_hotloops fold_rem465 sequence_file [mode]\n'
print 'A default run would be: ./DisEMBL.py 8 8 4 1.2 1.4 1.2 fasta_file'
print 'Mode: "default"(nothing) or "scores" which will give scores per residue in TAB seperated format'
raise SystemExit
db = open(file,'r')
print ' ____ _ _____ __ __ ____ _ _ _ _'
print '| _ \(_)___| ____| \/ | __ )| | / || || |'
print '| | | | / __| _| | |\/| | _ \| | | || || |_'
print '| |_| | \__ \ |___| | | | |_) | |___ | ||__ _|'
print '|____/|_|___/_____|_| |_|____/|_____| |_(_) |_|'
print '# Copyright (C) 2004 - Rune Linding & Lars Juhl Jensen '
print '# EMBL Biocomputing Unit - Heidelberg - Germany '
print '#'
for cur_record in SeqIO.parse(db, "fasta"):
sequence = upper(str(cur_record.seq.data))
# Run NN
COILS_raw, HOTLOOPS_raw, REM465_raw = JensenNet(sequence)
# Run Savitzky-Golay
REM465_smooth = SavitzkyGolay(smooth_frame,0,REM465_raw)
COILS_smooth = SavitzkyGolay(smooth_frame,0,COILS_raw)
HOTLOOPS_smooth = SavitzkyGolay(smooth_frame,0,HOTLOOPS_raw)
if mode == 'default':
sys.stdout.write('> '+cur_record.id+'_COILS ')
reportSlicesTXT( getSlices(COILS_smooth, fold_coils, join_frame, peak_frame, 0.43), sequence )
sys.stdout.write('> '+cur_record.id+'_REM465 ')
reportSlicesTXT( getSlices(REM465_smooth, fold_rem465, join_frame, peak_frame, 0.50), sequence )
sys.stdout.write('> '+cur_record.id+'_HOTLOOPS ')
reportSlicesTXT( getSlices(HOTLOOPS_smooth, fold_hotloops, join_frame, peak_frame, 0.086), sequence )
sys.stdout.write('\n')
elif mode == 'scores':
sys.stdout.write('# RESIDUE COILS REM465 HOTLOOPS\n')
for i in range(len(REM465_smooth)):
sys.stdout.write(sequence[i]+'\t'+fpformat.fix(COILS_smooth[i],5)+'\t'+fpformat.fix(REM465_smooth[i],5)+'\t'+fpformat.fix(HOTLOOPS_smooth[i],5)+'\n')
else:
sys.stderr.write('Wrong mode given: '+mode+'\n')
raise SystemExit
db.close()
return
runDisEMBLpipeline()
|
whysgeff/DisEMBL-1.4
|
DisEMBL.py
|
Python
|
gpl-2.0
| 6,507
|
[
"Biopython"
] |
cd23f7c89fe17355b77005f3bff51153a0d4f7a417bf8c2c094107f0f258624b
|
### want to look at concatenating the R1 and R2 for 16s so that I can align and then cluster them....
import sys
file1=sys.argv[1]
file2=sys.argv[2]
file3=sys.argv[3]
def file_len_fasta(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return (i + 1)/2
import itertools
from Bio import SeqIO
from Bio.SeqIO.FastaIO import FastaWriter
records_r = SeqIO.parse(open(file2,"rU"), "fasta")
records_f = SeqIO.parse(open(file1,"rU"), "fasta")
destination = open(file3,'w')
destination.close()
# normal biopython writer wraps every 60 characterrs, but mothur and qiime are not happy with that.
## since it can be such a big file I can try to slowly write it out, instead of keeping it all in memory.
length_file_1 = file_len_fasta(file1)
count = 0
import difflib
## first make sure the file only contains stuff from this go around.
destination = open(file3,'w')
destination.close()
destination = open(file3,'a')
writer = FastaWriter(destination, wrap=None)
writer.write_header()
for (forward,reverse)in itertools.izip(records_f, records_r):
count = count + 1
print count, "/", length_file_1
seq=difflib.SequenceMatcher(a=forward.seq.lower(), b=reverse.reverse_complement().lower())
print seq.ratio()
## this is because of the errors I saw in the illumina sequences where the r1 and r2 when reversed were very similar, although not identical.
if seq.ratio() < 0.40:
concatenated_R1_with_R2 = forward.seq + reverse.reverse_complement()
# print concatenated_R1_with_R2.seq
concatenated_R1_with_R2.id= forward.id
concatenated_R1_with_R2.description = ""
# print concatenated_R1_with_R2
writer.write_record(concatenated_R1_with_R2)
writer.write_footer()
destination.close()
|
jooolia/phylo_temporal_jericho
|
sequence_processing/concatenate_R1_and_R2_for_non_merging_primers.py
|
Python
|
mit
| 1,815
|
[
"Biopython"
] |
e46463a4249668049f1f2636787d6a6f1be42cedd47014a466e9f3b77753d76f
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
FACEBOOK_TEMPLATES = (
('question',(
#one-line
['{*actor*} <a href="{*url*}">asked a question about the article</a>: {*headline*}.'],
[{#short story
'template_title': '{*actor*} <a href="{*url*}">asked a question about the article</a>: {*headline*}.',
'template_body': '<b>"{*question*}"</b>'
}],
{#full story
'template_title': '{*actor*} <a href="{*url*}">asked a question about the article</a>: {*headline*}.',
'template_body': '''<div style="font-size:1.5em;margin-bottom:0.4em;">"{*question*}"</div>
<div style="font-weight:bold;margin-bottom:0.2em;">{*headline*}</div>
<div>{*article*} ...</div>'''
},
[{#action
'text': "Answer {*actor*}'s question",
'href': '{*url*}'
}],
)),
('answer',(
#one-line
['{*actor*} <a href="{*url*}">answered a question about the article</a>: {*headline*}.'],
[{#short story
'template_title': '{*actor*} <a href="{*url*}">answered a question about the article</a>: {*headline*}.',
'template_body': '''Q: "{*question*}" - <fb:name uid="{*asker*}" /><br/>
A: <b>"{*answer*}"</b> - {*actor*}'''
}],
{#full story
'template_title': '{*actor*} <a href="{*url*}">answered a question about the article</a>: {*headline*}.',
'template_body': '''<div style="margin-bottom:0.4em;">Q: "{*question*}" - <fb:name uid="{*asker*}" /></div>
<div style="font-size:1.5em;margin-bottom:0.2em;">A: "{*answer*}"</div>
<div style="font-size:1.5em;margin-bottom:0.4em;text-align:right;">- {*actor*}</div>
<div style="font-weight:bold;margin-bottom:0.2em;">{*headline*}</div>
<div>{*article*}</div>'''
},
[{#action
'text': "Read {*actor*}'s answer",
'href': '{*url*}'
}],
)),
('quip',(
#one-line
['{*actor*} <a href="{*url*}">quipped about the article</a>: {*headline*}.'],
[{#short story
'template_title': '{*actor*} <a href="{*url*}">quipped about the article</a>: {*headline*}.',
'template_body': '<b>{*actor*} {*verb*} {*quip*}</b>'
}],
{#full story
'template_title': '{*actor*} <a href="{*url*}">quipped about the article</a>: {*headline*}.',
'template_body': '''<div style="font-size:1.5em;margin-bottom:0.4em;margin-top:2px;"><span style="border:solid 2px lightblue;text-transform:uppercase;padding:0 2px;">{*actor*}</span> <span style="border:solid 2px blue;background-color:{*verb_color*};color:white;text-transform:uppercase;padding:0 2px;">{*verb*}</span> {*quip*}</div>
<div style="font-weight:bold;margin-bottom:0.2em;">{*headline*}</div>
<div>{*article*}</div>'''
},
[{#action
'text': "Quip back!",
'href': '{*url*}'
}],
)),
('letter',(
#one-line
['{*actor*} wrote a letter to the editor: <a href="{*url*}">{*title*}</a>.'],
[{#short story
'template_title': '{*actor*} wrote a letter to the editor: <a href="{*url*}">{*title*}</a>.',
'template_body': '<b>{*title*}</b><br/>{*body*}'
}],
{#full story
'template_title': '{*actor*} wrote a letter to the editor: <a href="{*url*}">{*title*}</a>.',
'template_body': '''<div style="font-size:1.5em;margin-bottom:0.2em;">{*title*}</div>
<div>{*body*}</div>'''
},
[{#action
'text': "Read {*actor*}'s letter",
'href': '{*url*}'
}],
)),
('letter_re_article',(
#one-line
['{*actor*} <a href="{*url*}">wrote a letter to the editor</a> in response to the article: {*headline*}'],
[{#short story
'template_title': '{*actor*} <a href="{*url*}">wrote a letter to the editor</a> in response to the article: {*headline*}',
'template_body': '<b>{*title*}</b><br/>{*body*}'
}],
{#full story
'template_title': '{*actor*} <a href="{*url*}">wrote a letter to the editor</a> in response to the article: {*headline*}',
'template_body': '''<div style="margin-top:0.8em;">{*actor*} wrote:</div>
<div style="font-size:1.5em;margin-bottom:0.2em;">{*title*}</div>
<div>{*body*}</div>
<div style="margin-bottom:0.2em;margin-top:1em;">In response to:</div>
<div style="font-weight:bold;margin-bottom:0.2em;">{*headline*}</div>
<div>{*article*}</div>'''
},
[{#action
'text': "Read {*actor*}'s letter",
'href': '{*url*}'
}],
)),
('letter_re_letter',(
#one-line
['{*actor*} responded to a letter to the editor: <a href="{*url*}">{*title*}</a>'],
[{#short story
'template_title': '{*actor*} responded to a letter to the editor: <a href="{*url*}">{*title*}</a>',
'template_body': '''In response to <fb:name uid="{*original_user*}" possessive="true"/> letter, {*actor*} wrote:<br/>
<b>{*title*}</b><br/>{*body*}'''
}],
{#full story
'template_title': '{*actor*} responded to a letter to the editor: <a href="{*url*}">{*title*}</a>',
'template_body': '''<div style="margin-top:0.8em;">{*actor*} wrote:</div>
<div style="font-size:1.5em;margin-bottom:0.2em;">{*title*}</div>
<div>{*body*}</div>
<div style="margin-bottom:0.2em;margin-top:1em;">In response to <fb:name uid="{*original_user*}" possessive="true"/> letter:</div>
<div style="font-weight:bold;margin-bottom:0.2em;">{*original_title*}</div>
<div>{*original_body*}</div>'''
},
[{#action
'text': "Read {*actor*}'s letter",
'href': '{*url*}'
}],
)),
('letter_re_letter_re_article',(
#one-line
['{*actor*} <a href="{*url*}">responded to a letter to the editor</a> about the article: {*headline*}'],
[{#short story
'template_title': '{*actor*} <a href="{*url*}">responded to a letter to the editor</a> about the article: {*headline*}',
'template_body': '''In response to <fb:name uid="{*original_user*}" possessive="true"/> letter, {*actor*} wrote:<br/>
<b>{*title*}</b><br/>{*body*}'''
}],
{#full story
'template_title': '{*actor*} <a href="{*url*}">responded to a letter to the editor</a> about the article: {*headline*}',
'template_body': '''<div style="margin-top:0.8em;">{*actor*} wrote:</div>
<div style="font-size:1.5em;margin-bottom:0.2em;">{*title*}</div>
<div>{*body*}</div>
<div style="margin-bottom:0.2em;margin-top:1em;">In response to <fb:name uid="{*original_user*}" possessive="true"/> letter about {*headline*}:</div>
<div style="font-weight:bold;margin-bottom:0.2em;">{*original_title*}</div>
<div>{*original_body*}</div>'''
},
[{#action
'text': "Read {*actor*}'s letter",
'href': '{*url*}'
}],
)),
)
|
brianboyer/newsmixer
|
social/facebook_templates.py
|
Python
|
gpl-3.0
| 8,098
|
[
"Brian"
] |
40c23b22f55f2864966302227c4178fb2002957808df350bfe2b71382cc89286
|
'''
Created on Apr 22, 2015
'''
import py2bash.base.node as base
__author__ = "Eugene Kondrashev"
__copyright__ = "Copyright 2015, Eugene Kondrashev"
__credits__ = ["Eugene Kondrashev"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Eugene Kondrashev"
__email__ = "[email protected]"
__status__ = "Prototype"
class Visitor(base.Visitor):
def visit_For(self, e):
pattern = '''for ((%(i)s=%(start)s;%(i)s<=%(end)s;%(step)s))
do
%(body)s
done'''
target = self.visit(e.target)
if not e.iter.func.id in ('range', 'xrange'):
raise NotImplementedError(e.func.id)
iter_ = self.visit(e.iter)
if len(iter_) == 1:
start, end, step = 0, iter_[0], 1
elif len(iter_) == 2:
start, end, step = iter_[0], iter_[1], 1
elif len(iter_) == 3:
start, end, step = iter_
else:
raise NotImplementedError(iter_)
body = map(self.visit, e.body)
if step == 1:
step = '%s++' % target
else:
step = '%s+=%s' % (target, step)
return pattern % {
'i': target,
'start': start,
'end': end,
'step': step,
'body':'\n '.join(body)
}
|
ekondrashev/py2bash
|
py2bash/optimized/memory/node.py
|
Python
|
mit
| 1,382
|
[
"VisIt"
] |
3d5c1fc71b7d32dd233876d08f122f72578f732da39f1031f8c0107adafa03c2
|
#!/usr/bin/env python
'''
Monitor the aosn web site for new realtime data from Tethys and use
DAPloaders.py to load new data into the stoqs_realtime database.
Mike McCann
MBARI 17 May 2011
'''
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../"))
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE']='config.settings.local'
import DAPloaders
import logging
from datetime import datetime, timedelta
import urllib.request, urllib.error, urllib.parse
import time
import csv
import re
from stoqs import models as mod
import socket
# Set up global variables for logging output to STDOUT
logger = logging.getLogger('monitorTethysLogger')
fh = logging.StreamHandler()
f = logging.Formatter("%(levelname)s %(asctime)sZ %(filename)s %(funcName)s():%(lineno)d %(message)s")
fh.setFormatter(f)
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
class NoMoreTethysLogSets(Exception):
pass
def getLogSetStartAndEnd(url):
'''Step though the lines of the html to pick out the start and end epoch sends of this LogSet in the url
return start and end times as datetime objects.
'''
folderStart = None
folderEnd = None
for line in urllib.request.urlopen(url).read().split('\n'):
##logger.debug("line = %s" % line)
d = re.match('.+var startTime=([\.\d]+)', line) # Get time of last data for this mission
if d:
folderStart = datetime.utcfromtimestamp(float(d.group(1)) / 1000.)
logger.info("Datetime of first data in %s is %s", url, folderStart)
d = re.match('.+var endTime=([\.\d]+)', line) # Get time of last data for this mission
if d:
folderEnd = datetime.utcfromtimestamp(float(d.group(1)) / 1000.)
logger.info("Datetime of last data in %s is %s", url, folderEnd)
if folderStart and folderEnd:
return (folderStart, folderEnd)
def getNewTethysDatetime(startFolder = None):
'''
Scrape AOSN TethysLog web site for the end time and mission directory name of the most recent data.
Returns datetime object and string of the directory name as a tuple. If startFolder is provided then
start scanning following this folder. This is so that the calling routine can skip over missions
that have no measurements.
'''
previousFolderName = None
previousFolderEndTime = None
# Get directory list from aosn site
url = 'http://aosn.mbari.org/TethysLogs/'
logger.info("Scanning log sets and index.html start and end times in %s", url)
if startFolder:
logger.info("Starting at startFolder = %s", startFolder)
for line in urllib.request.urlopen(url).read().split('\n'):
# href="20110526T151722/"
logger.debug("line = %s", line)
f = re.match('.+href="(\d+T\d+)', line)
if f:
logger.debug("f.group(1) = %s", f.group(1))
if startFolder:
if f.group(1) < startFolder:
# Skip over folders that are before startFolder. We need to revisit startFolder to set previous.. values
previousFolderName = f.group(1)
logger.debug("Skipping folder %s and it is earlier than startFolder = %s", previousFolderName, startFolder)
continue
folderDatetime = datetime(*time.strptime(f.group(1), '%Y%m%dT%H%M%S')[:6])
logger.debug("Going on to test whether folder %s has good data beyond lastTethysDatetime = %s", f.group(1), lastTethysDatetime)
if folderDatetime > lastTethysDatetime:
logger.info("Folder %s is newer than than last Tethys data in %s", f.group(1), stoqsDB)
(folderStart, folderEnd) = getLogSetStartAndEnd(url + f.group(1))
if folderStart and folderEnd:
return (f.group(1), folderStart, folderEnd, previousFolderName, previousFolderEndTime)
else:
previousFolderName = f.group(1)
logger.debug("Folder %s contains data that have already been loaded in %s", f.group(1), stoqsDB)
for line in urllib.request.urlopen(url + f.group(1)).read().split('\n'):
##logger.debug("line = %s", line)
d = re.match('.+var endTime=([\.\d]+)', line) # Get time of last data for this mission
if d:
previousFolderEndTime = datetime.utcfromtimestamp(float(d.group(1)) / 1000.)
logger.info("Fell out of loop looking for new Folder with a startDate > lastTethysDatetime,"
" checking for new data in previousFolderName = %s", previousFolderName)
if previousFolderName is not None:
(folderStart, folderEnd) = getLogSetStartAndEnd(url + previousFolderName)
if (folderEnd - lastTethysDatetime) > timedelta(seconds = 1):
logger.info("%s has new data that extends %s beyond what is in %s", previousFolderName,
(folderEnd - lastTethysDatetime), stoqsDB)
# Return None for prebious values as there's no way we'll assign an end time in this situation
return (previousFolderName, folderStart, folderEnd, None, None)
raise NoMoreTethysLogSets
if __name__ == '__main__':
# No arguments to parse. Just look for new data and DAP load it.
# Get time of last data item loaded
stoqsDB = 'stoqsdb_realtime'
hostname = socket.gethostbyaddr(socket.gethostname())[0]
url = 'http://' + hostname + '/' + stoqsDB + '/position/tethys/last/1/data.csv'
activityBaseName = 'Tethys realtime - '
# When set to None getNewTethysDatetime() starts scanning from the beginning of the aosn index
# If starting a new database then set to a value, e.g. '20110609T033428' to begin at that directory
startFolderName = None
while True:
# Loop until we run out of new Tethys data from aosn
try:
lastTethysEs = float(csv.DictReader(urllib.request.urlopen(url)).next()['epochSeconds'])
except StopIteration:
lastTethysEs = 0.0
lastTethysDatetime = datetime.utcfromtimestamp(lastTethysEs)
logger.info("-----------------------------------------------------------------------------------------------------------------")
logger.info("Checking %s", url)
logger.info("Last Tethys data in %s is from %s", stoqsDB, lastTethysDatetime)
try:
logger.debug("Calling getNewTethysDatetime with startFolderName = %s", startFolderName)
(folderName, folderStart, folderEnd, previousFolderName, previousFolderEndTime) = getNewTethysDatetime(startFolderName);
logger.debug("getNewTethysDatetime() returned previousFolderEndTime = %s", previousFolderEndTime)
except NoMoreTethysLogSets:
logger.info("No new Tethys data. Exiting.")
sys.exit(1)
eval(input("Pause"))
logger.info("Received new Tethys data ending at %s in folder %s", folderEnd, folderName)
newTethysURL = 'http://beach.mbari.org:8080/thredds/dodsC/lrauv/tethys/%s/shore.nc' % folderName
# The first time through the loop we need to get the last folder from the 'previous...' items returned by getNewTethysDatetime()
# After that we'll remember the last folder from the last successful load done in this loop
if not startFolderName and previousFolderName:
lastAName = activityBaseName + previousFolderName
lastAEndTime = previousFolderEndTime
else:
lastAName = None
# If we have any activities with a null enddate whose name matches the last activity name then set the end date
if lastAName:
nullEndtimeActList = mod.Activity.objects.filter(enddate__isnull = True)
if lastAName in [a.name for a in nullEndtimeActList]:
logger.info("Found Activity name = %s with null enddate", lastAName)
# We have a new folderName, set the end time for the previous Activity and "close" that log set
mod.Activity.objects.filter(name = lastAName).update(enddate = lastAEndTime)
logger.info("Set endDatetime = %s for previous Activity.id = %s", a.enddate, a.id)
# Tethys loads from TDS on beach - create with Null end time, we don't know the end until we have the next folder
aName = activityBaseName + folderName
##newTethysDatetime = newTethysDatetime - timedelta(hours = 4)
try:
lrauvLoad = DAPloaders.Lrauv_Loader(activityName = aName,
url = newTethysURL,
startDatetime = folderStart,
endDatetime = None,
dataStartDatetime = lastTethysDatetime,
platformName = 'tethys',
stride = 1)
except DAPloaders.NoValidData:
# Make sure we don't visit this startFolder again - add 1 second to it
startFolderName = (datetime(*time.strptime(folderName, '%Y%m%dT%H%M%S')[:6]) + timedelta(seconds = 1)).strftime('%Y%m%dT%H%M%S')
logger.info("No measurements in this log set. Activity was not created as there was nothing to load.")
if not previousFolderName:
logger.info("previousFolderName = None, indicating that we are looking for valid data in the last folder")
logger.info("Exiting now to prevent time consuming loop waiting for valid data in previousFolderName = %s", previousFolderName)
sys.exit(1)
else:
logger.info("Loading data from %s into %s", newTethysURL, stoqsDB)
nMeasurements = lrauvLoad.process_data()
newComment = "%s loaded on %sZ" % (' '.join(lrauvLoad.varsLoaded), datetime.utcnow())
logger.info("Updating comment with newComment = %s", newComment)
mod.Activity.objects.filter(name = aName).update(comment = newComment)
if not previousFolderName:
logger.info("previousFolderName = None, indicating that we are looking for valid data in the last folder")
logger.info("Exiting now to prevent time consuming loop waiting for valid data in previousFolderName = %s", previousFolderName)
sys.exit(1)
elif nMeasurements:
# Make sure we don't visit this startFolder again - add 1 second to it
startFolderName = (datetime(*time.strptime(folderName, '%Y%m%dT%H%M%S')[:6]) + timedelta(seconds = 1)).strftime('%Y%m%dT%H%M%S')
else:
startFolderName = folderName
lastAName = aName
lastAEndTime = folderEnd
|
stoqs/stoqs
|
stoqs/loaders/CANON/realtime/monitorTethys.py
|
Python
|
gpl-3.0
| 10,723
|
[
"VisIt"
] |
6b4692c74c8fbe13d027228deae4ce795be1a52c7903c723a11bda2dc826852d
|
"""
This module defines the building blocks of a CP2K input file. The cp2k input structure is essentially a collection
of "sections" which are similar to dictionary objects that activate modules of the cp2k executable, and then
"keywords" which adjust variables inside of those modules. For example, FORCE_EVAL section will activate CP2K's ability
to calculate forces, and inside FORCE_EVAL, the Keyword "METHOD can be set to "QS" to set the method of force evaluation
to be the quickstep (DFT) module.
A quick overview of the module:
-- Section class defines the basis of Cp2k input and contains methods for manipulating these objects similarly to Dicts.
-- Keyword class defines the keywords used inside of Section objects that changes variables in Cp2k program
-- Cp2kInput class is special instantiation of Section that is used to represent the full cp2k calculation input.
-- The rest of the classes are children of Section intended to make initialization of common sections easier.
"""
import copy
import os
import re
import textwrap
import warnings
from typing import Dict, List, Literal, Sequence, Tuple, Union
from monty.io import zopen
from monty.json import MSONable
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.cp2k.utils import _postprocessor, _preprocessor
__author__ = "Nicholas Winner"
__version__ = "0.3"
__email__ = "[email protected]"
__date__ = "August 2020"
class Keyword(MSONable):
"""
Class representing a keyword argument in CP2K. Within CP2K Sections, which activate features
of the CP2K code, the keywords are arguments that control the functionality of that feature.
For example, the section "FORCE_EVAL" activates the evaluation of forces/energies, but within
"FORCE_EVAL" the keyword "METHOD" controls whether or not this will be done with, say,
"Quickstep" (DFT) or "EIP" (empirical interatomic potential).
"""
def __init__(
self,
name: str,
*values,
description: str = None,
units: str = None,
verbose: bool = True,
repeats: bool = False,
):
"""
Initializes a keyword. These Keywords and the value passed to them are sometimes as simple as KEYWORD VALUE,
but can also be more elaborate such as KEYWORD [UNITS] VALUE1 VALUE2, which is why this class exists:
to handle many values and control easy printing to an input file.
Args:
name (str): The name of this keyword. Must match an acceptable keyword from CP2K
args: All non-keyword arguments after 'name' are interpreted as the values to set for
this keyword. i.e: KEYWORD ARG1 ARG2 would provide two values to the keyword.
description (str): The description for this keyword. This can make readability of
input files easier for some. Default=None.
units (str): The units for this keyword. If not specified, CP2K default units will be
used. Consult manual for default units. Default=None.
repeats (bool): Whether or not this keyword may be repeated. Default=False.
"""
self.name = name
self.values = values
self.description = description
self.repeats = repeats
self.units = units
self.verbose = verbose
def __str__(self):
return (
self.name.__str__()
+ " "
+ (f"[{self.units}] " if self.units else "")
+ " ".join(map(str, self.values))
+ (" ! " + self.description if (self.description and self.verbose) else "")
)
def __eq__(self, other):
if self.name.upper() == other.name.upper():
v1 = [_.upper() if isinstance(_, str) else _ for _ in self.values]
v2 = [_.upper() if isinstance(_, str) else _ for _ in other.values]
if v1 == v2:
if self.units == self.units:
return True
return False
def __add__(self, other):
return KeywordList(keywords=[self, other])
def __getitem__(self, item):
return self.values[item]
def as_dict(self):
"""
Get a dictionary representation of the Keyword
"""
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["name"] = self.name
d["values"] = self.values
d["description"] = self.description
d["repeats"] = self.repeats
d["units"] = self.units
d["verbose"] = self.verbose
return d
def get_string(self):
"""
String representation of Keyword
"""
return self.__str__()
@classmethod
def from_dict(cls, d):
"""
Initialise from dictionary
"""
return Keyword(
d["name"],
*d["values"],
description=d["description"],
repeats=d["repeats"],
units=d["units"],
verbose=d["verbose"],
)
@staticmethod
def from_string(s):
"""
Initialise from a string
"""
s = s.strip()
if s.__contains__("!") or s.__contains__("#"):
s, description = re.split("(?:!|#)", s)
description = description.strip()
else:
description = None
units = re.findall(r"\[(.*)\]", s) or [None]
s = re.sub(r"\[(.*)\]", "", s)
return Keyword(*map(_postprocessor, s.split()), units=units[0], description=description)
def verbosity(self, v):
"""
Change the printing of this keyword's description.
"""
self.verbose = v
class KeywordList(MSONable):
"""
Some keywords can be repeated, which makes accessing them via the normal dictionary
methods a little unnatural. This class deals with this by defining a collection
of same-named keywords that are accessed by one name.
"""
def __init__(self, keywords: Sequence[Keyword]):
"""
Initializes a keyword. These Keywords and the value passed to them are sometimes as simple as KEYWORD VALUE,
but can also be more elaborate such as KEYWORD [UNITS] VALUE1 VALUE2, which is why this class exists:
to handle many values and control easy printing to an input file.
Args:
keywords: A list of keywords. Must all have the same name (case-insensitive)
"""
assert all(k.name.upper() == keywords[0].name.upper() for k in keywords) if keywords else True
self.name = keywords[0].name if keywords else None
self.keywords = keywords
def __str__(self):
return self.get_string()
def __eq__(self, other):
return all(k == o for k, o in zip(self.keywords, other.keywords))
def __add__(self, other):
return self.extend(other)
def __len__(self):
return len(self.keywords)
def __getitem__(self, item):
return self.keywords[item]
def append(self, item):
"""
append the keyword list
"""
self.keywords.append(item)
def extend(self, l):
"""
extend the keyword list
"""
self.keywords.extend(l)
def get_string(self, indent=0):
"""
String representation of Keyword
"""
return " \n".join(["\t" * indent + k.__str__() for k in self.keywords])
def verbosity(self, verbosity):
"""
Silence all keywords in keyword list
"""
for k in self.keywords:
k.verbosity(verbosity)
class Section(MSONable):
"""
Basic input representation of input to Cp2k. Activates functionality inside of the Cp2k executable.
"""
required_sections: tuple = ()
required_keywords: tuple = ()
def __init__(
self,
name: str,
subsections: dict = None,
repeats: bool = False,
description: Union[str, None] = None,
keywords: Dict = {},
section_parameters: Union[List, Tuple] = [],
location: str = None,
verbose: bool = True,
alias: str = None,
**kwargs,
):
"""
Basic object representing a CP2K Section. Sections activate different parts of the calculation.
For example, FORCE_EVAL section will activate CP2K's ability to calculate forces. Sections are
described with the following:
Args:
name (str): The name of the section (must match name in CP2K)
subsections (dict): A dictionary of subsections that are nested in this section. Format is
{'NAME': Section(*args, **kwargs). The name you chose for 'NAME' to index that subsection
does not *have* to be the same as the section's true name, but we recommend matching
them. You can specify a blank dictionary if there are no subsections, or if you want to
insert the subsections later.
repeats (bool): Whether or not this section can be repeated. Most sections cannot. Default=False.
description (str): Description of this section for easier readability/more descriptiveness
keywords (list): the keywords to be set for this section. Each element should be a Keyword
object. This can be more cumbersome than simply using kwargs for building a class in a script,
but is more convenient for the class instantiations of CP2K sections (see below).
section_parameters (list): the section parameters for this section. Section parameters are
specialized keywords that modify the behavior of the section overall. Most
sections do not have section parameters, but some do. Unlike normal Keywords, these are
specified as strings and not as Keyword objects.
location (str): the path to the section in the form 'SECTION/SUBSECTION1/SUBSECTION3', example for
QS module: 'FORCE_EVAL/DFT/QS'. This location is used to automatically determine if a subsection
requires a supersection to be activated.
verbose (str): Controls how much is printed to Cp2k input files (Also see Keyword). If True, then
a description of the section will be printed with it as a comment (if description is set).
Default=True.
kwargs are interpreted as keyword, value pairs and added to the keywords array as Keyword objects
"""
self.name = name
self.subsections = subsections if subsections is not None else {}
self.repeats = repeats
self.description = description
self.keywords = keywords
self.section_parameters = section_parameters
self.location = location
self.verbose = verbose
self.alias = alias
self.kwargs = kwargs
for k, v in self.kwargs.items():
self.keywords[k] = Keyword(k, v)
for k in self.required_sections:
if not self.check(k):
raise UserWarning(f"WARNING: REQUIRED SECTION {k} HAS NOT BEEN INITIALIZED")
for k in self.required_keywords:
if k not in self.keywords:
raise UserWarning(f"WARNING: REQUIRED KEYWORD {k} HAS NOT BEEN PROVIDED")
def __str__(self):
return self.get_string()
def __eq__(self, d):
d2 = copy.deepcopy(d)
s2 = copy.deepcopy(self)
d2.silence()
s2.silence()
return d2.as_dict() == s2.as_dict()
def __deepcopy__(self, memodict={}):
c = copy.deepcopy(self.as_dict())
return getattr(__import__(c["@module"], globals(), locals(), c["@class"], 0), c["@class"]).from_dict(
copy.deepcopy(self.as_dict())
)
def __getitem__(self, d):
for k in self.keywords:
if str(k).upper() == str(d).upper():
return self.keywords[k]
for k in self.subsections:
if str(k).upper() == str(d).upper():
return self.subsections[k]
raise KeyError
def __add__(self, other):
if isinstance(other, (Keyword, KeywordList)):
if other.name in self.keywords:
self.keywords[other.name] += other
else:
self.keywords[other.name] = other
elif isinstance(other, Section):
self.insert(other)
else:
TypeError("Can only add sections or keywords.")
def add(self, other):
"""
Add another keyword to the current section
"""
assert isinstance(other, (Keyword, KeywordList))
self.__add__(other)
def get(self, d, default=None):
"""
Similar to get for dictionaries. This will attempt to retrieve the
section or keyword matching d. Will not raise an error if d does not
exist.
Args:
d: the key to retrieve, if present
default: what to return if d is not found
"""
for k in self.keywords:
if str(k).upper() == str(d).upper():
return self.keywords[k]
for k in self.subsections:
if str(k).upper() == str(d).upper():
return self.subsections[k]
return default
def __setitem__(self, key, value):
if isinstance(value, Section):
if key in self.subsections:
self.subsections[key] = value.__deepcopy__()
else:
self.insert(value)
else:
if not isinstance(value, (Keyword, KeywordList)):
value = Keyword(key, value)
match = [k for k in self.keywords if key.upper() == k.upper()]
if match:
del self.keywords[match[0]]
self.keywords[key] = value
def __delitem__(self, key):
"""
Delete section with name matching key OR delete all keywords
with names matching this key
"""
l = [s for s in self.subsections if s.upper() == key.upper()]
if l:
del self.subsections[l[0]]
return
l = [k for k in self.keywords if k.upper() == key.upper()]
if l:
del self.keywords[l[0]]
return
raise KeyError("No section or keyword matching the given key.")
def __sub__(self, other):
return self.__delitem__(other)
def update(self, d: dict):
"""
Update the Section according to a dictionary argument. This is most useful
for providing user-override settings to default parameters. As you pass a
dictionary the class variables like "description", "location", or "repeats"
are not included. Therefore, it is recommended that this be used to modify
existing Section objects to a user's needs, but not used for the creation
of new Section child-classes.
Args:
d (dict): A dictionary containing the update information. Should use nested dictionaries to
specify the full path of the update. If a section or keyword does not exist, it will be created,
but only with the values that are provided in "d", not using default values from a Section object.
{'SUBSECTION1': {'SUBSEC2': {'NEW_KEYWORD', 'NEW_VAL'},{'NEW_SUBSEC': {'NEW_KWD': 'NEW_VAL'}}}
"""
Section._update(self, d)
@staticmethod
def _update(d1, d2):
"""
Helper method for self.update(d) method (see above).
"""
for k, v in d2.items():
if isinstance(v, (str, float, int, bool)):
d1[k] = Keyword(k, v)
elif isinstance(v, (Keyword, KeywordList)):
d1[k] = v
elif isinstance(v, dict):
tmp = [_ for _ in d1.subsections if k.upper() == _.upper()]
if not tmp:
d1.insert(Section(k, subsections={}))
Section._update(d1.subsections[k], v)
else:
Section._update(d1.subsections[tmp[0]], v)
elif isinstance(v, Section):
d1.insert(v)
else:
raise TypeError(f"Unrecognized type: {type(v)}")
def set(self, d: dict):
"""
Alias for update. Used by custodian.
"""
self.update(d)
def unset(self, d: dict):
"""
Dict based deletion. Used by custodian.
"""
for k, v in d.items():
if isinstance(v, (str, float, int, bool)):
del self[k][v]
elif isinstance(v, (Keyword, Section, KeywordList)):
del self[k][v.name]
elif isinstance(v, dict):
self[k].unset(v)
else:
TypeError("Can only add sections or keywords.")
def inc(self, d: dict):
"""
Mongo style dict modification. Include.
"""
for k, v in d.items():
if isinstance(v, (str, float, bool, int)):
v = Keyword(k, v)
if isinstance(v, (Keyword, Section, KeywordList)):
self.add(v)
elif isinstance(v, dict):
self[k].inc(v)
else:
TypeError("Can only add sections or keywords.")
def insert(self, d):
"""
Insert a new section as a subsection of the current one
"""
assert isinstance(d, Section)
self.subsections[d.alias or d.name] = d.__deepcopy__()
def check(self, path: str):
"""
Check if section exists within the current using a path. Can be useful for cross-checking whether or not
required dependencies have been satisfied, which CP2K does not enforce.
Args:
path (str): Path to section of form 'SUBSECTION1/SUBSECTION2/SUBSECTION_OF_INTEREST'
"""
_path = path.split("/")
s = self.subsections
for p in _path:
tmp = [_ for _ in s if p.upper() == _.upper()]
if tmp:
s = s[tmp[0]].subsections
else:
return False
return True
def by_path(self, path: str):
"""
Access a sub-section using a path. Used by the file parser.
Args:
path (str): Path to section of form 'SUBSECTION1/SUBSECTION2/SUBSECTION_OF_INTEREST'
"""
_path = path.split("/")
if _path[0] == self.name:
_path = _path[1:]
s = self
for p in _path:
s = s.subsections[p] # only search subsections in case of repeat name
return s
def get_string(self):
"""
Get string representation of Section
"""
return Section._get_string(self)
@staticmethod
def _get_string(d, indent=0):
"""
Helper function to return a pretty string of the section. Includes indentation and
descriptions (if present).
"""
string = ""
if d.description and d.verbose:
string += (
"\n"
+ textwrap.fill(
d.description,
initial_indent="\t" * indent + "! ",
subsequent_indent="\t" * indent + "! ",
width=50,
)
+ "\n"
)
string += "\t" * indent + "&" + d.name
string += " " + " ".join(map(str, d.section_parameters)) + "\n"
for k, v in d.keywords.items():
if isinstance(v, KeywordList):
string += v.get_string(indent=indent + 1) + "\n"
else:
string += "\t" * (indent + 1) + v.get_string() + "\n"
for k, v in d.subsections.items():
string += v._get_string(v, indent + 1)
string += "\t" * indent + "&END " + d.name + "\n"
return string
def verbosity(self, verbosity):
"""
Change the section verbossity recursively by turning on/off the printing of descriptions.
Turning off descriptions may reduce the appealing documentation of input files, but also
helps de-clutter them.
"""
self.verbose = verbosity
for k, v in self.keywords.items():
v.verbosity(verbosity)
for k, v in self.subsections.items():
v.verbosity(verbosity)
def silence(self):
"""
Recursively delete all print sections so that only defaults are printed out.
"""
if self.subsections:
if self.subsections.get("PRINT"):
del self.subsections["PRINT"]
for _s in self.subsections:
self.subsections[_s].silence()
class Cp2kInput(Section):
"""
Special instance of 'Section' class that is meant to represent the overall cp2k input.
Distinguishes itself from Section by overriding get_string() to not print this section's
title and by implementing the file i/o.
"""
def __init__(self, name: str = "CP2K_INPUT", subsections: dict = None, **kwargs):
"""
Initialize Cp2kInput by calling the super
"""
self.name = name
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
description = "CP2K Input"
super().__init__(
name,
repeats=False,
description=description,
section_parameters=[],
subsections=subsections,
**kwargs,
)
def get_string(self):
"""
Get string representation of the Cp2kInput
"""
s = ""
for k in self.subsections.keys():
s += self.subsections[k].get_string()
return s
@classmethod
def _from_dict(cls, d):
"""
Initialize from a dictionary
"""
return Cp2kInput(
"CP2K_INPUT",
subsections=getattr(
__import__(d["@module"], globals(), locals(), d["@class"], 0),
d["@class"],
)
.from_dict(d)
.subsections,
)
@staticmethod
def from_file(file: str):
"""
Initialize from a file
"""
with zopen(file, "rt") as f:
txt = _preprocessor(f.read(), os.path.dirname(f.name))
return Cp2kInput.from_string(txt)
@staticmethod
def from_string(s: str):
"""
Initialize from a string
"""
lines = s.splitlines()
lines = [line.replace("\t", "") for line in lines]
lines = [line.strip() for line in lines]
lines = [line for line in lines if line]
return Cp2kInput.from_lines(lines)
@classmethod
def from_lines(cls, lines: Union[List, tuple]):
"""
Helper method to read lines of file
"""
cp2k_input = Cp2kInput("CP2K_INPUT", subsections={})
Cp2kInput._from_lines(cp2k_input, lines)
return cp2k_input
def _from_lines(self, lines):
"""
Helper method, reads lines of text to get a Cp2kInput
"""
current = self.name
description = ""
for line in lines:
if line.startswith("!") or line.startswith("#"):
description += line[1:].strip()
elif line.upper().startswith("&END"):
current = "/".join(current.split("/")[:-1])
elif line.startswith("&"):
name, subsection_params = line.split()[0][1:], line.split()[1:]
alias = name + " " + " ".join(subsection_params) if subsection_params else None
s = Section(
name,
section_parameters=subsection_params,
alias=alias,
subsections={},
description=description,
)
description = ""
self.by_path(current).insert(s)
current = current + "/" + alias if alias else current + "/" + name
else:
kwd = Keyword.from_string(line)
tmp = self.by_path(current).get(kwd.name)
if tmp:
if isinstance(tmp, KeywordList):
self.by_path(current)[kwd.name].append(kwd)
else:
self.by_path(current)[kwd.name] = KeywordList(keywords=[kwd, tmp])
else:
self.by_path(current).keywords[kwd.name] = kwd
def write_file(
self,
input_filename: str = "cp2k.inp",
output_dir: str = ".",
make_dir_if_not_present: bool = True,
):
"""Write input to a file.
Args:
input_filename (str, optional): Defaults to "cp2k.inp".
output_dir (str, optional): Defaults to ".".
make_dir_if_not_present (bool, optional): Defaults to True.
"""
if not os.path.isdir(output_dir) and make_dir_if_not_present:
os.mkdir(output_dir)
filepath = os.path.join(output_dir, input_filename)
with open(filepath, "w") as f:
f.write(self.get_string())
class Global(Section):
"""
Controls 'global' settings for cp2k execution such as RUN_TYPE and PROJECT_NAME
"""
def __init__(self, project_name: str = "CP2K", run_type: str = "ENERGY_FORCE", **kwargs):
"""Initialize the global section
Args:
project_name (str, optional): Defaults to "CP2K".
run_type (str, optional): See https://manual.cp2k.org/trunk/CP2K_INPUT/GLOBAL.html#list_RUN_TYPE
for possible values. Defaults to "ENERGY_FORCE".
"""
self.project_name = project_name
self.run_type = run_type
self.kwargs = kwargs
description = (
"Section with general information regarding which kind of simulation"
+ "to perform an parameters for the whole PROGRAM"
)
keywords = {
"PROJECT_NAME": Keyword("PROJECT_NAME", project_name),
"RUN_TYPE": Keyword("RUN_TYPE", run_type),
"EXTENDED_FFT_LENGTHS": Keyword("EXTENDED_FFT_LENGTHS", True),
}
super().__init__(
"GLOBAL",
description=description,
keywords=keywords,
subsections={},
**kwargs,
)
class ForceEval(Section):
"""
Controls the calculation of energy and forces in Cp2k
"""
def __init__(self, subsections: dict = None, **kwargs):
"""Initialize the ForceEval section
Args:
subsections (dict, optional): Defaults to None.
"""
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
description = "parameters needed to calculate energy and forces and describe the system you want to analyze."
keywords = {
"METHOD": Keyword("METHOD", kwargs.get("METHOD", "QS")),
"STRESS_TENSOR": Keyword("STRESS_TENSOR", kwargs.get("STRESS_TENSOR", "ANALYTICAL")),
}
super().__init__(
"FORCE_EVAL",
repeats=True,
description=description,
keywords=keywords,
subsections=subsections,
**kwargs,
)
class Dft(Section):
"""
Controls the DFT parameters in Cp2k
"""
def __init__(
self,
basis_set_filenames="BASIS_MOLOPT",
potential_filename="GTH_POTENTIALS",
uks: bool = True,
wfn_restart_file_name: str = None,
subsections: dict = None,
**kwargs,
):
"""Initialize the DFT section.
Args:
basis_set_filenames (str, optional): Name of the file that contains the basis set
information. Defaults to "BASIS_MOLOPT".
potential_filename (str, optional): Name of the file that contains the pseudopotential
information. Defaults to "GTH_POTENTIALS".
uks (bool, optional): Whether to run unrestricted Kohn Sham (spin polarized).
Defaults to True.
wfn_restart_file_name (str, optional): Defaults to None.
subsections (dict, optional): Any subsections to initialize with. Defaults to None.
"""
self.basis_set_filenames = basis_set_filenames
self.potential_filename = potential_filename
self.uks = uks
self.wfn_restart_file_name = wfn_restart_file_name
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
description = "parameter needed by dft programs"
keywords = {
"BASIS_SET_FILE_NAME": KeywordList([Keyword("BASIS_SET_FILE_NAME", k) for k in basis_set_filenames]),
"POTENTIAL_FILE_NAME": Keyword("POTENTIAL_FILE_NAME", potential_filename),
"UKS": Keyword("UKS", uks),
}
if wfn_restart_file_name:
keywords["WFN_RESTART_FILE_NAME"] = Keyword("WFN_RESTART_FILE_NAME", wfn_restart_file_name)
super().__init__(
"DFT",
description=description,
keywords=keywords,
subsections=self.subsections,
**kwargs,
)
class Subsys(Section):
"""
Controls the definition of the system to be simulated
"""
def __init__(self, subsections: dict = None, **kwargs):
"""
Initialize the subsys section
"""
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
description = "a subsystem: coordinates, topology, molecules and cell"
super().__init__("SUBSYS", description=description, subsections=subsections, **kwargs)
class QS(Section):
"""
Controls the quickstep settings (DFT driver)
"""
def __init__(
self,
method: Literal["GPW", "GAPW"] = "GPW",
eps_default: float = 1e-7,
extrapolation: Literal["PS", "ASPC"] = "PS",
subsections: dict = None,
**kwargs,
):
"""
Initialize the QS Section
Args:
method ("GPW" | "GAPW"): What DFT methodology to use. GPW (Gaussian Plane Waves) for DFT with
pseudopotentials or GAPW (Gaussian Augmented Plane Waves) for all electron calculations.
eps_default (float): The default level of convergence accuracy. NOTE: This is a global value for all
the numerical value of all EPS_* values in QS module. It is not the same as EPS_SCF, which sets
convergence accuracy of the SCF cycle alone.
extrapolation ("PS" | "ASPC"): Method use for extrapolation. If using gamma-point-only calculation, then one
should use PS for relaxations and ASPC for MD. See the manual for other options.
subsections (dict): Subsections to initialize with.
"""
self.method = method
self.eps_default = eps_default
self.extrapolation = extrapolation
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
description = "parameters needed to set up the Quickstep framework"
keywords = {
"METHOD": Keyword("METHOD", method),
"EPS_DEFAULT": Keyword("EPS_DEFAULT", eps_default),
"EXTRAPOLATION": Keyword("EXTRAPOLATION", extrapolation),
}
super().__init__(
"QS",
description=description,
keywords=keywords,
subsections=subsections,
**kwargs,
)
class Scf(Section):
"""
Controls the self consistent field loop
"""
def __init__(
self,
max_scf: int = 50,
eps_scf: float = 1e-6,
scf_guess: Literal[
"ATOMIC", "CORE", "HISTORY_RESTART", "MOPAC", "NONE", "RANDOM", "RESTART", "SPARSE"
] = "RESTART",
subsections: dict = None,
**kwargs,
):
"""
Initialize the Scf section
Args:
max_scf (int): Maximum number of SCF loops before terminating. Defaults to 50.
eps_scf (float): Convergence criteria for SCF loop. Defaults to 1e-6.
scf_guess: Initial guess for SCF loop.
"ATOMIC": Generate an atomic density using the atomic code
"CORE": Diagonalize the core Hamiltonian for an initial guess.
"HISTORY_RESTART": Extrapolated from previous RESTART files.
"MOPAC": Use same guess as MOPAC for semi-empirical methods or a simple diagonal density matrix for
other methods.
"NONE": Skip initial guess (only for NON-SCC DFTB).
"RANDOM": Use random wavefunction coefficients.
"RESTART": Use the RESTART file as an initial guess (and ATOMIC if not present).
"SPARSE": Generate a sparse wavefunction using the atomic code (for OT based methods).
"""
self.max_scf = max_scf
self.eps_scf = eps_scf
self.scf_guess = scf_guess
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
description = "Parameters needed to perform an SCF run."
keywords = {
"MAX_SCF": Keyword("MAX_SCF", max_scf),
"EPS_SCF": Keyword("EPS_SCF", eps_scf),
"SCF_GUESS": Keyword("SCF_GUESS", scf_guess), # Uses Restart file if present, and ATOMIC if not present
"MAX_ITER_LUMO": Keyword("MAX_ITER_LUMO", kwargs.get("max_iter_lumo", 400)),
}
super().__init__(
"SCF",
description=description,
keywords=keywords,
subsections=subsections,
**kwargs,
)
class Mgrid(Section):
"""
Controls the multigrid for numerical integration
"""
def __init__(
self,
cutoff: Union[int, float] = 1200,
rel_cutoff: Union[int, float] = 80,
ngrids: int = 5,
progression_factor: int = 3,
subsections: dict = None,
**kwargs,
):
"""
Initialize the MGRID section
Args:
cutoff: Cutoff energy (in Rydbergs for historical reasons) defining how find of Gaussians will be used
rel_cutoff: The relative cutoff energy, which defines how to map the Gaussians onto the multigrid. If the
the value is too low then, even if you have a high cutoff with sharp Gaussians, they will be mapped
to the course part of the multigrid
ngrids: number of grids to use
progression_factor: divisor that decides how to map Gaussians the multigrid after the highest mapping is
decided by rel_cutoff
"""
self.cutoff = cutoff
self.rel_cutoff = rel_cutoff
self.ngrids = ngrids
self.progression_factor = progression_factor
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
description = (
"Multigrid information. Multigrid allows for sharp gaussians and diffuse "
+ "gaussians to be treated on different grids, where the spacing of FFT integration "
+ "points can be tailored to the degree of sharpness/diffusiveness of the gaussians."
)
keywords = {
"CUTOFF": Keyword(
"CUTOFF",
cutoff,
description="Cutoff in [Ry] for finest level of the MG.",
),
"REL_CUTOFF": Keyword(
"REL_CUTOFF",
rel_cutoff,
description="Controls which gaussians are mapped to which level of the MG",
),
"NGRIDS": Keyword("NGRIDS", ngrids),
"PROGRESSION_FACTOR": Keyword("PROGRESSION_FACTOR", progression_factor),
}
super().__init__(
"MGRID",
description=description,
keywords=keywords,
subsections=subsections,
**kwargs,
)
class Diagonalization(Section):
"""
Controls diagonalization settings (if using traditional diagonalization).
"""
def __init__(
self,
eps_adapt: float = 0,
eps_iter: float = 1e-8,
eps_jacobi: float = 0,
jacobi_threshold: float = 1e-7,
subsections: dict = None,
**kwargs,
):
"""
Initialize the diagronalization section
"""
self.eps_adapt = eps_adapt
self.eps_iter = eps_iter
self.eps_jacobi = eps_jacobi
self.jacobi_threshold = jacobi_threshold
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
location = "CP2K_INPUT/FORCE_EVAL/DFT/SCF/DIAGONALIZATION"
keywords = {
"EPS_ADAPT": Keyword("EPS_ADAPT", eps_adapt),
"EPS_ITER": Keyword("EPS_ITER", eps_iter),
"EPS_JACOBI": Keyword("EPS_JACOBI", eps_jacobi),
"JACOBI_THRESHOLD": Keyword("JACOBI_THRESHOLD", jacobi_threshold),
}
super().__init__(
"DIAGONALIZATION",
keywords=keywords,
repeats=False,
location=location,
subsections=self.subsections,
**kwargs,
)
class Davidson(Section):
"""
Parameters for davidson diagonalization
"""
def __init__(
self,
new_prec_each: int = 20,
preconditioner: Literal[
"FULL_ALL", "FULL_KINETIC", "FULL_SINGLE", "FULL_SINGLE_INVERSE", "FULL_S_INVERSE", "NONE"
] = "FULL_SINGLE_INVERSE",
**kwargs,
):
"""
Args:
new_prec_each (int): How often to recalculate the preconditioner.
preconditioner (str): Preconditioner to use.
"FULL_ALL": Most effective state selective preconditioner based on diagonalization, requires the
ENERGY_GAP parameter to be an underestimate of the HOMO-LUMO gap. This preconditioner is
recommended for almost all systems, except very large systems where make_preconditioner would
dominate the total computational cost.
"FULL_KINETIC": Cholesky inversion of S and T, fast construction, robust, use for very large systems.
"FULL_SINGLE": Based on H-eS diagonalisation, not as good as FULL_ALL, but somewhat cheaper to apply.
"FULL_SINGLE_INVERSE": Based on H-eS cholesky inversion, similar to FULL_SINGLE in preconditioning
efficiency but cheaper to construct, might be somewhat less robust. Recommended for large systems.
"FULL_S_INVERSE": Cholesky inversion of S, not as good as FULL_KINETIC, yet equally expensive.
"NONE": skip preconditioning
"""
self.new_prec_each = new_prec_each
self.preconditioner = preconditioner
keywords = {
"NEW_PREC_EACH": Keyword("NEW_PREC_EACH", new_prec_each),
"PRECONDITIONER": Keyword("PRECONDITIONER", preconditioner),
}
super().__init__(
"DAVIDSON",
keywords=keywords,
repeats=False,
location=None,
subsections={},
**kwargs,
)
class OrbitalTransformation(Section):
"""
Turns on the Orbital Transformation scheme for diagonalizing the Hamiltonian. Much faster and with
guaranteed convergence compared to normal diagonalization, but requires the system to have a band
gap.
NOTE: OT has poor convergence for metallic systems and cannot use SCF mixing or smearing. Therefore,
you should not use it for metals or systems with 'small' band gaps. In that case, use normal
diagonalization, which will be slower, but will converge properly.
"""
def __init__(
self,
minimizer: str = "CG",
preconditioner: str = "FULL_ALL",
algorithm: str = "STRICT",
energy_gap: float = 0.01,
linesearch: str = "2PNT",
subsections: dict = None,
**kwargs,
):
"""
Initialize the OT section
Args:
minimizer (str): The minimizer to use with the OT method. Default is conjugate gradient method,
which is more robust, but more well-behaved systems should use DIIS, which can be as much
as 50% faster.
preconditioner (str): Preconditioner to use for OT, FULL_ALL tends to be most robust, but is
not always most efficient. For difficult systems, FULL_SINGLE_INVERSE can be more robust,
and is reasonably efficient with large systems. For huge, but well behaved, systems,
where construction of the preconditioner can take a very long time, FULL_KINETIC can be a good
choice.
energy_gap (float): Guess for the band gap. For FULL_ALL, should be smaller than the actual band gap,
so simply using 0.01 is a robust value. Choosing a larger value will help if you start with a bad
initial guess though. For FULL_SINGLE_INVERSE, energy_gap is treated as a lower bound. Values lower
than 0.05 in this case can lead to stability issues.
algorithm (str): What algorithm to use for OT. 'Strict': Taylor or diagonalization based algorithm.
IRAC: Orbital Transformation based Iterative Refinement of the Approximative Congruence
transformation (OT/IR).
linesearch (str): From the manual: 1D line search algorithm to be used with the OT minimizer,
in increasing order of robustness and cost. MINIMIZER CG combined with LINESEARCH
GOLD should always find an electronic minimum. Whereas the 2PNT minimizer is almost always OK,
3PNT might be needed for systems in which successive OT CG steps do not decrease the total energy.
"""
self.minimizer = minimizer
self.preconditioner = preconditioner
self.algorithm = algorithm
self.energy_gap = energy_gap
self.linesearch = linesearch
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
description = (
"Sets the various options for the orbital transformation (OT) method. "
+ "Default settings already provide an efficient, yet robust method. Most "
+ "systems benefit from using the FULL_ALL preconditioner combined with a small "
+ "value (0.001) of ENERGY_GAP. Well-behaved systems might benefit from using "
+ "a DIIS minimizer. Advantages: It's fast, because no expensive diagonalization"
+ "is performed. If preconditioned correctly, method guaranteed to find minimum. "
+ "Disadvantages: Sensitive to preconditioning. A good preconditioner can be "
+ "expensive. No smearing, or advanced SCF mixing possible: POOR convergence for "
+ "metallic systems."
)
keywords = {
"MINIMIZER": Keyword("MINIMIZER", minimizer),
"PRECONDITIONER": Keyword("PRECONDITIONER", preconditioner),
"ENERGY_GAP": Keyword("ENERGY_GAP", energy_gap),
"ALGORITHM": Keyword("ALGORITHM", algorithm),
"LINESEARCH": Keyword("LINESEARCH", linesearch),
}
super().__init__(
"OT",
description=description,
keywords=keywords,
subsections=self.subsections,
**kwargs,
)
class Cell(Section):
"""
Defines the simulation cell (lattice)
"""
def __init__(self, lattice: Lattice, **kwargs):
"""
Initialize the cell section.
Args:
lattice: pymatgen lattice object
"""
self.lattice = lattice
self.kwargs = kwargs
description = "Input parameters needed to set up the CELL."
keywords = {
"A": Keyword("A", *lattice.matrix[0]),
"B": Keyword("B", *lattice.matrix[1]),
"C": Keyword("C", *lattice.matrix[2]),
}
super().__init__("CELL", description=description, keywords=keywords, subsections={}, **kwargs)
class Kind(Section):
"""
Specifies the information for the different atom types being simulated.
"""
def __init__(
self,
specie: str,
alias: Union[str, None] = None,
magnetization: float = 0.0,
subsections: dict = None,
basis_set: str = "GTH_BASIS",
potential: str = "GTH_POTENTIALS",
ghost: bool = False,
**kwargs,
):
"""
Initialize a KIND section
Args:
specie (Species or Element): Object representing the atom.
alias (str): Alias for the atom, can be used for specifying modifications
to certain atoms but not all, e.g. Mg_1 and Mg_2 to force difference
oxidation states on the two atoms.
magnetization (float): From the CP2K Manual: The magnetization used
in the atomic initial guess. Adds magnetization/2 spin-alpha
electrons and removes magnetization/2 spin-beta electrons.
basis_set (str): Basis set for this atom, accessible from the
basis set file specified
potential (str): Pseudopotential for this atom, accessible from the
potential file
kwargs: Additional kwargs to pass to Section()
"""
self.name = "KIND"
self.specie = specie
self.alias = alias
self.magnetization = magnetization
self.subsections = subsections if subsections else {}
self.basis_set = basis_set
self.potential = potential
self.ghost = ghost
self.kwargs = kwargs
self.description = "The description of the kind of the atoms (mostly for QM)"
keywords = {
"ELEMENT": Keyword("ELEMENT", specie.__str__()),
"MAGNETIZATION": Keyword("MAGNETIZATION", magnetization),
"BASIS_SET": Keyword("BASIS_SET", basis_set),
"POTENTIAL": Keyword("POTENTIAL", potential),
"GHOST": Keyword("GHOST", ghost),
}
kind_name = alias if alias else specie.__str__()
self.alias = kind_name
self.section_parameters = [kind_name]
self.location = None
self.verbose = True
self.repeats = False
super().__init__(
name=self.name,
subsections=self.subsections,
description=self.description,
keywords=keywords,
section_parameters=self.section_parameters,
alias=self.alias,
location=self.location,
verbose=self.verbose,
**self.kwargs,
)
class DftPlusU(Section):
"""
Controls DFT+U for an atom kind
"""
def __init__(
self,
eps_u_ramping=1e-5,
init_u_ramping_each_scf=False,
l=-1,
u_minus_j=0,
u_ramping=0,
):
"""
Initialize the DftPlusU section.
Args:
eps_u_ramping: (float) SCF convergence threshold at which to start ramping the U value
init_u_ramping_each_scf: (bool) Whether or not to do u_ramping each scf cycle
l: (int) angular moment of the orbital to apply the +U correction
u_minus_j: (float) the effective U parameter, Ueff = U-J
u_ramping: (float) stepwise amount to increase during ramping until u_minus_j is reached
"""
self.name = "DFT_PLUS_U"
self.eps_u_ramping = 1e-5
self.init_u_ramping_each_scf = False
self.l = l
self.u_minus_j = u_minus_j
self.u_ramping = u_ramping
keywords = {
"EPS_U_RAMPING": Keyword("EPS_U_RAMPING", eps_u_ramping),
"INIT_U_RAMPING_EACH_SCF": Keyword("INIT_U_RAMPING_EACH_SCF", init_u_ramping_each_scf),
"L": Keyword("L", l),
"U_MINUS_J": Keyword("U_MINUS_J", u_minus_j),
"U_RAMPING": Keyword("U_RAMPING", u_ramping),
}
super().__init__(
name=self.name,
subsections=None,
description=self.description,
keywords=keywords,
section_parameters=self.section_parameters,
alias=None,
location=None,
)
class Coord(Section):
"""
Specifies the coordinates of the atoms using a pymatgen structure object.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
aliases: Union[dict, None] = None,
subsections: dict = None,
**kwargs,
):
"""
Args:
structure: Pymatgen structure object
alias (bool): whether or not to identify the sites by Element + number so you can do things like
assign unique magnetization do different elements.
"""
self.structure = structure
self.aliases = aliases
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
description = (
"The coordinates for simple systems (like small QM cells) are specified "
+ "here by default using explicit XYZ coordinates. More complex systems "
+ "should be given via an external coordinate file in the SUBSYS%TOPOLOGY section."
)
if aliases:
keywords = {k: KeywordList([Keyword(k, *structure[i].coords) for i in aliases[k]]) for k in aliases}
else:
keywords = {
ss: KeywordList([Keyword(s.specie.symbol, *s.coords) for s in structure.sites if s.specie.symbol == ss])
for ss in structure.symbol_set
}
super().__init__(
name="COORD",
description=description,
keywords=keywords,
alias=None,
subsections=self.subsections,
**kwargs,
)
class PDOS(Section):
"""
Controls printing of projected density of states onto the different atom KINDS
(elemental decomposed DOS).
"""
def __init__(self, nlumo: int = -1, **kwargs):
"""
Initialize the PDOS section
Args:
nlumo: how many unoccupied orbitals to include (-1==ALL)
"""
self.nlumo = nlumo
self.kwargs = kwargs
description = "Controls printing of the projected density of states"
keywords = {
"NLUMO": Keyword("NLUMO", nlumo),
"COMPONENTS": Keyword("COMPONENTS"),
}
super().__init__("PDOS", description=description, keywords=keywords, subsections={}, **kwargs)
class LDOS(Section):
"""
Controls printing of the LDOS (List-Density of states). i.e. projects onto specific atoms.
"""
def __init__(self, index: int = 1, alias: Union[str, None] = None, **kwargs):
"""
Initialize the LDOS section
Args:
index: Index of the atom to project onto
"""
self.index = index
self.alias = alias
self.kwargs = kwargs
description = "Controls printing of the projected density of states decomposed by atom type"
keywords = {"COMPONENTS": Keyword("COMPONENTS"), "LIST": Keyword("LIST", index)}
super().__init__(
"LDOS",
subsections={},
alias=alias,
description=description,
keywords=keywords,
**kwargs,
)
class V_Hartree_Cube(Section):
"""
Controls printing of the hartree potential as a cube file.
"""
def __init__(self, keywords=None, **kwargs):
"""
Initialize the V_HARTREE_CUBE section
"""
self.keywords = keywords if keywords else {}
self.kwargs = kwargs
description = (
"Controls the printing of a cube file with eletrostatic potential generated by "
+ "the total density (electrons+ions). It is valid only for QS with GPW formalism. "
+ "Note that by convention the potential has opposite sign than the expected physical one."
)
super().__init__(
"V_HARTREE_CUBE",
subsections={},
description=description,
keywords=keywords,
**kwargs,
)
class MO_Cubes(Section):
"""
Controls printing of the molecular orbital eigenvalues
"""
def __init__(self, write_cube: bool = False, nhomo: int = 1, nlumo: int = 1, **kwargs):
"""
Initialize the MO_CUBES section
"""
self.write_cube = write_cube
self.nhomo = nhomo
self.nlumo = nlumo
self.kwargs = kwargs
description = (
"Controls the printing of a cube file with eletrostatic potential generated by "
+ "the total density (electrons+ions). It is valid only for QS with GPW formalism. "
+ "Note that by convention the potential has opposite sign than the expected physical one."
)
keywords = {
"WRITE_CUBES": Keyword("WRITE_CUBE", write_cube),
"NHOMO": Keyword("NHOMO", nhomo),
"NLUMO": Keyword("NLUMO", nlumo),
}
super().__init__(
"MO_CUBES",
subsections={},
description=description,
keywords=keywords,
**kwargs,
)
class E_Density_Cube(Section):
"""
Controls printing of the electron density cube file
"""
def __init__(self, **kwargs):
"""
Initialize the E_DENSITY_CUBE Section
"""
self.kwargs = kwargs
description = (
"Controls the printing of cube files with the electronic density and, for LSD "
+ "calculations, the spin density."
)
super().__init__(
"E_DENSITY_CUBE",
subsections={},
description=description,
keywords={},
**kwargs,
)
class Smear(Section):
"""
Control electron smearing
"""
def __init__(
self,
elec_temp: Union[int, float] = 300,
method: str = "FERMI_DIRAC",
fixed_magnetic_moment: float = -1e2,
**kwargs,
):
"""
Initialize the SMEAR section
"""
self.elec_temp = elec_temp
self.method = method
self.fixed_magnetic_moment = fixed_magnetic_moment
self.kwargs = kwargs
description = "Activates smearing of electron occupations"
keywords = {
"ELEC_TEMP": Keyword("ELEC_TEMP", elec_temp),
"METHOD": Keyword("METHOD", method),
"FIXED_MAGNETIC_MOMENT": Keyword("FIXED_MAGNETIC_MOMENT", fixed_magnetic_moment),
}
super().__init__(
"SMEAR",
description=description,
keywords=keywords,
subsections={},
**kwargs,
)
class BrokenSymmetry(Section):
"""
Define the required atomic orbital occupation assigned in initialization
of the density matrix, by adding or subtracting electrons from specific
angular momentum channels. It works only with GUESS ATOMIC
"""
def __init__(
self,
l_alpha: int = -1,
n_alpha: int = 0,
nel_alpha: int = -1,
l_beta: int = -1,
n_beta: int = 0,
nel_beta: int = -1,
):
"""
Initialize the broken symmetry section
Args:
l_alpha: Angular momentum quantum number of the orbitals whose occupation is changed
n_alpha: Principal quantum number of the orbitals whose occupation is changed.
Default is the first not occupied
nel_alpha: Orbital occupation change per angular momentum quantum number. In
unrestricted calculations applied to spin alpha
l_beta: Same as L_alpha for beta channel
n_beta: Same as N_alpha for beta channel
nel_beta: Same as NEL_alpha for beta channel
"""
self.l_alpha = l_alpha
self.n_alpha = n_alpha
self.nel_alpha = nel_alpha
self.l_beta = l_beta
self.n_beta = n_beta
self.nel_beta = nel_beta
description = (
"Define the required atomic orbital occupation assigned in initialization "
+ "of the density matrix, by adding or subtracting electrons from specific "
+ "angular momentum channels. It works only with GUESS ATOMIC"
)
keywords_alpha = {
"L": Keyword("L", l_alpha),
"N": Keyword("N", n_alpha),
"NEL": Keyword("NEL", nel_alpha),
}
alpha = Section("ALPHA", keywords=keywords_alpha, subsections={}, repeats=False)
keywords_beta = {
"L": Keyword("L", l_beta),
"N": Keyword("N", n_beta),
"NEL": Keyword("NEL", nel_beta),
}
beta = Section("BETA", keywords=keywords_beta, subsections={}, repeats=False)
super().__init__(
"BS",
description=description,
subsections={"ALPHA": alpha, "BETA": beta},
keywords={},
repeats=False,
)
class XC_FUNCTIONAL(Section):
"""
Defines the XC functional to use
"""
def __init__(self, functional: str, subsections: dict = None, **kwargs):
"""
Initialize the XC_FUNCTIONAL class
"""
self.functional = functional
self.subsections = subsections if subsections else {}
self.kwargs = kwargs
location = "CP2K_INPUT/FORCE_EVAL/DFT/XC/XC_FUNCTIONAL"
built_in = [
"BL3YLP",
"BEEFVDW",
"BLYP",
"BP",
"HCTH120",
"LDA",
"NONE",
"NO_SHORTCUT",
"OLYP",
"PADE",
"PBE",
"PBE0",
"TPSS",
]
if functional in built_in:
section_params = [functional]
elif functional.upper() in ["PBESOL", "REVPBE"]:
section_params = ["PBE"]
self.subsections["PBE"] = Section(
"PBE",
keywords={"PARAMETERIZATION": Keyword("PARAMETERIZATION", functional)},
)
else:
section_params = []
warnings.warn(
"The selected functional has no short-cut in CP2K. "
"You must specify subsection to define this functional."
)
super().__init__(
"XC_FUNCTIONAL",
subsections=self.subsections,
location=location,
repeats=False,
section_parameters=section_params,
**kwargs,
)
class PBE(Section):
"""
Info about the PBE functional.
"""
def __init__(
self,
parameterization: str = "ORIG",
scale_c: Union[float, int] = 1,
scale_x: Union[float, int] = 1,
):
"""
Args:
parameterization (str):
ORIG: original PBE
PBESOL: PBE for solids/surfaces
REVPBE: revised PBE
scale_c (float): scales the correlation part of the functional.
scale_x (float): scales the exchange part of the functional.
"""
self.parameterization = parameterization
self.scale_c = scale_c
self.scale_x = scale_x
location = "CP2K_INPUT/FORCE_EVAL/DFT/XC/XC_FUNCTIONAL/PBE"
keywords = {
"PARAMETRIZATION": Keyword("PARAMETRIZATION", parameterization),
"SCALE_C": Keyword("SCALE_C", scale_c),
"SCALE_X": Keyword("SCALE_X", scale_x),
}
super().__init__(
"PBE",
subsections={},
repeats=False,
location=location,
section_parameters=[],
keywords=keywords,
)
class Kpoints(Section):
"""
Description of the k-points to use for the calculation.
"""
def __init__(
self,
kpts: Union[Sequence, Sequence[Sequence[int]]],
weights: Union[Sequence, None] = None,
eps_geo: float = 1e-6,
full_grid: bool = False,
parallel_group_size: int = -1,
scheme: str = "MONKHORST-PACK",
symmetry: bool = False,
units: str = "B_VECTOR",
verbose: bool = False,
wavefunctions: str = "COMPLEX",
):
"""
Args:
kpts (list, tuple): a 2D array for the kpoints of the form
[(1,1,1),]. If len(kpts) == 1. Then it is taken as subdivisions
for automatic kpoint scheme. If it has more entries, it is
taken as manual entries for kpoints.
weights (list, tuple): a weight for each kpoint. Default is to
weigh each by 1
eps_geo (float): tolerance for symmetry. Default=1e-6
full_grid (bool): use full (not reduced) kpoint grid. Default=False.
parallel_group_size (int): from cp2k manual: Number of processors
to be used for a single kpoint. This number must divide the
total number of processes. The number of groups must divide
the total number of kpoints. Value=-1 (smallest possible
number of processes per group, satisfying the constraints).
Value=0 (all processes). Value=n (exactly n processes).
Default=-1.
scheme (str): kpoint generation scheme. Default='Monkhorst-Pack'
symmetry (bool): Use symmetry to reduce the number of kpoints.
Default=False.
units (str): Units for the kpoint coordinates (reciprocal coordinates
or cartesian). Default='B_VECTOR' (reciprocal)
verbose (bool): verbose output for kpoints. Default=False
wavefunctions (str): Whether to use complex or real valued wavefunctions
(if available). Default='complex'
"""
description = "Sets up the kpoints"
keywords = {}
self.kpts = kpts
self.weights = weights if weights else [1] * len(kpts)
assert len(self.kpts) == len(self.weights)
self.eps_geo = eps_geo
self.full_grid = full_grid
self.parallel_group_size = parallel_group_size
self.scheme = scheme
self.symmetry = symmetry
self.units = units
self.verbose = verbose
self.wavefunctions = wavefunctions
if len(kpts) == 1:
keywords["SCHEME"] = Keyword("SCHEME", scheme, *kpts[0])
elif len(kpts) > 1:
keywords["KPOINT"] = KeywordList([Keyword("KPOINT", *k, w) for k, w in zip(self.kpts, self.weights)])
else:
raise ValueError("No k-points provided!")
keywords.update(
{
"EPS_GEO": Keyword("EPS_GEO", eps_geo),
"FULL_GRID": Keyword("FULL_GRID", full_grid),
"PARALLEL_GROUP_SIZE": Keyword("PARALLEL_GROUP_SIZE", parallel_group_size),
"SYMMETRY": Keyword("SYMMETRY", symmetry),
"UNITS": Keyword("UNITS", units),
"VERBOSE": Keyword("VERBOSE", verbose),
"WAVEFUNCTIONS": Keyword("WAVEFUNCTIONS", wavefunctions),
}
)
super().__init__(
name="KPOINTS",
subsections=None,
repeats=False,
description=description,
keywords=keywords,
)
@classmethod
def from_kpoints(cls, kpoints):
"""
Initialize the section from a Kpoints object (pymatgen.io.vasp.inputs).
Args:
kpoints: A pymatgen kpoints object.
"""
k = kpoints.as_dict()
kpoints = k["kpoints"]
weights = k["kpts_weights"]
scheme = k["generation_style"]
if scheme.lower() == "Monkhorst":
scheme = "MONKHORST-PACK"
units = k["coord_type"]
if k["coord_type"]:
if k["coord_type"].lower() == "reciprocal":
units = "B_VECTOR"
elif k["coord_type"].lower() == "cartesian":
units = "CART_ANGSTROM"
else:
units = "B_VECTOR"
return Kpoints(kpts=kpoints, weights=weights, scheme=scheme, units=units)
|
materialsproject/pymatgen
|
pymatgen/io/cp2k/inputs.py
|
Python
|
mit
| 64,419
|
[
"CP2K",
"Gaussian",
"MOPAC",
"VASP",
"pymatgen"
] |
0a81b487b47e06e71d16df4642d4e0491dfaaef41cef6029d214fdd4fb8c31fe
|
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are
noted.
.. seealso::
`scipy.special.cython_special` -- Typed Cython versions of special functions
Error handling
==============
Errors are handled by returning NaNs or other appropriate values.
Some of the special function routines can emit warnings when an error
occurs. By default this is disabled; to enable it use `errprint`.
.. autosummary::
:toctree: generated/
errprint -- Set or return the error printing flag for special functions.
SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions and their derivatives.
ai_zeros -- [+]Compute `nt` zeros and values of the Airy function Ai and its derivative.
bi_zeros -- [+]Compute `nt` zeros and values of the Airy function Bi and its derivative.
itairy -- Integrals of Airy functions
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1
ellipkinc -- Incomplete elliptic integral of the first kind
ellipe -- Complete elliptic integral of the second kind
ellipeinc -- Incomplete elliptic integral of the second kind
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of the first kind of real order and complex argument.
jn -- Bessel function of the first kind of real order and complex argument
jve -- Exponentially scaled Bessel function of order `v`.
yn -- Bessel function of the second kind of integer order and real argument.
yv -- Bessel function of the second kind of real order and complex argument.
yve -- Exponentially scaled Bessel function of the second kind of real order.
kn -- Modified Bessel function of the second kind of integer order `n`
kv -- Modified Bessel function of the second kind of real order `v`
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function of the first kind of real order.
ive -- Exponentially scaled modified Bessel function of the first kind
hankel1 -- Hankel function of the first kind
hankel1e -- Exponentially scaled Hankel function of the first kind
hankel2 -- Hankel function of the second kind
hankel2e -- Exponentially scaled Hankel function of the second kind
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Jahnke-Emden Lambda function, Lambdav(x).
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Compute zeros of integer-order Bessel functions Jn and Jn'.
jnyn_zeros -- [+]Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
jn_zeros -- [+]Compute zeros of integer-order Bessel function Jn(x).
jnp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Jn'(x).
yn_zeros -- [+]Compute zeros of integer-order Bessel function Yn(x).
ynp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Yn'(x).
y0_zeros -- [+]Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
y1_zeros -- [+]Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
y1p_zeros -- [+]Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of the first kind of order 0.
j1 -- Bessel function of the first kind of order 1.
y0 -- Bessel function of the second kind of order 0.
y1 -- Bessel function of the second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
k0e -- Exponentially scaled modified Bessel function K of order 0
k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
k1e -- Exponentially scaled modified Bessel function K of order 1
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Integrals of Bessel functions of order 0
it2j0y0 -- Integrals related to Bessel functions of order 0
iti0k0 -- Integrals of modified Bessel functions of order 0
it2i0k0 -- Integrals related to modified Bessel functions of order 0
besselpoly -- [+]Weighted integral of a Bessel function.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
kvp -- Compute nth derivative of real-order modified Bessel function Kv(z)
ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
spherical_jn -- Spherical Bessel function of the first kind or its derivative.
spherical_yn -- Spherical Bessel function of the second kind or its derivative.
spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Compute Ricatti-Bessel function of the first kind and its derivative.
riccati_yn -- [+]Compute Ricatti-Bessel function of the second kind and its derivative.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function.
modstruve -- Modified Struve function.
itstruve0 -- Integral of the Struve function of order 0.
it2struve0 -- Integral related to the Struve function of order 0.
itmodstruve0 -- Integral of the modified Struve function of order 0.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Binomial distribution cumulative distribution function.
bdtrc -- Binomial distribution survival function.
bdtri -- Inverse function to `bdtr` with respect to `p`.
bdtrik -- Inverse function to `bdtr` with respect to `k`.
bdtrin -- Inverse function to `bdtr` with respect to `n`.
btdtr -- Cumulative density function of the beta distribution.
btdtri -- The `p`-th quantile of the beta distribution.
btdtria -- Inverse of `btdtr` with respect to `a`.
btdtrib -- btdtria(a, p, x)
fdtr -- F cumulative distribution function.
fdtrc -- F survival function.
fdtri -- The `p`-th quantile of the F-distribution.
fdtridfd -- Inverse to `fdtr` vs dfd
gdtr -- Gamma distribution cumulative density function.
gdtrc -- Gamma distribution survival function.
gdtria -- Inverse of `gdtr` vs a.
gdtrib -- Inverse of `gdtr` vs b.
gdtrix -- Inverse of `gdtr` vs x.
nbdtr -- Negative binomial cumulative distribution function.
nbdtrc -- Negative binomial survival function.
nbdtri -- Inverse of `nbdtr` vs `p`.
nbdtrik -- Inverse of `nbdtr` vs `k`.
nbdtrin -- Inverse of `nbdtr` vs `n`.
ncfdtr -- Cumulative distribution function of the non-central F distribution.
ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
ncfdtri -- Inverse cumulative distribution function of the non-central F distribution.
ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution.
nctdtr -- Cumulative distribution function of the non-central `t` distribution.
nctdtridf -- Calculate degrees of freedom for non-central t distribution.
nctdtrit -- Inverse cumulative distribution function of the non-central t distribution.
nctdtrinc -- Calculate non-centrality parameter for non-central t distribution.
nrdtrimn -- Calculate mean of normal distribution given other params.
nrdtrisd -- Calculate standard deviation of normal distribution given other params.
pdtr -- Poisson cumulative distribution function
pdtrc -- Poisson survival function
pdtri -- Inverse to `pdtr` vs m
pdtrik -- Inverse to `pdtr` vs k
stdtr -- Student t distribution cumulative density function
stdtridf -- Inverse of `stdtr` vs df
stdtrit -- Inverse of `stdtr` vs `t`
chdtr -- Chi square cumulative distribution function
chdtrc -- Chi square survival function
chdtri -- Inverse to `chdtrc`
chdtriv -- Inverse to `chdtr` vs `v`
ndtr -- Gaussian cumulative distribution function.
log_ndtr -- Logarithm of Gaussian cumulative distribution function.
ndtri -- Inverse of `ndtr` vs x
chndtr -- Non-central chi square cumulative distribution function
chndtridf -- Inverse to `chndtr` vs `df`
chndtrinc -- Inverse to `chndtr` vs `nc`
chndtrix -- Inverse to `chndtr` vs `x`
smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function
smirnovi -- Inverse to `smirnov`
kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution
kolmogi -- Inverse function to kolmogorov
tklmbda -- Tukey-Lambda cumulative distribution function
logit -- Logit ufunc for ndarrays.
expit -- Expit ufunc for ndarrays.
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + `x`.
inv_boxcox -- Compute the inverse of the Box-Cox transformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- Elementwise function for computing entropy.
rel_entr -- Elementwise function for computing relative entropy.
kl_div -- Elementwise function for computing Kullback-Leibler divergence.
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Logarithm of the absolute value of the Gamma function for real inputs.
loggamma -- Principal branch of the logarithm of the Gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Regularized lower incomplete gamma function.
gammaincinv -- Inverse to `gammainc`
gammaincc -- Regularized upper incomplete gamma function.
gammainccinv -- Inverse to `gammaincc`
beta -- Beta function.
betaln -- Natural logarithm of absolute value of beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse function to beta integral.
psi -- The digamma function.
rgamma -- Gamma function inverted
polygamma -- Polygamma function n.
multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
digamma -- psi(x[, out])
poch -- Rising factorial (z)_m
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Returns the error function of complex argument.
erfc -- Complementary error function, ``1 - erf(x)``.
erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
erfi -- Imaginary error function, ``-i erf(i z)``.
erfinv -- Inverse function for erf.
erfcinv -- Inverse function for erfc.
wofz -- Faddeeva function
dawsn -- Dawson's integral.
fresnel -- Fresnel sin and cos integrals
fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
modfresnelp -- Modified Fresnel positive integrals
modfresnelm -- Modified Fresnel negative integrals
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Compute nt complex zeros of error function erf(z).
fresnelc_zeros -- [+]Compute nt complex zeros of cosine Fresnel integral C(z).
fresnels_zeros -- [+]Compute nt complex zeros of sine Fresnel integral S(z).
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre function of integer order and real degree.
sph_harm -- Compute spherical harmonics.
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre function of the first kind for complex arguments.
lpn -- [+]Legendre function of the first kind.
lqn -- [+]Legendre function of the second kind.
lpmn -- [+]Sequence of associated Legendre functions of the first kind.
lqmn -- [+]Sequence of associated Legendre functions of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic functions E^p_n(l)
ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l)
ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
eval_legendre -- Evaluate Legendre polynomial at a point.
eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point.
eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point.
eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
eval_jacobi -- Evaluate Jacobi polynomial at a point.
eval_laguerre -- Evaluate Laguerre polynomial at a point.
eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
eval_hermite -- Evaluate physicist's Hermite polynomial at a point.
eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point.
eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point.
The following functions compute roots and quadrature weights for
orthogonal polynomials:
.. autosummary::
:toctree: generated/
roots_legendre -- Gauss-Legendre quadrature.
roots_chebyt -- Gauss-Chebyshev (first kind) quadrature.
roots_chebyu -- Gauss-Chebyshev (second kind) quadrature.
roots_chebyc -- Gauss-Chebyshev (first kind) quadrature.
roots_chebys -- Gauss-Chebyshev (second kind) quadrature.
roots_jacobi -- Gauss-Jacobi quadrature.
roots_laguerre -- Gauss-Laguerre quadrature.
roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
roots_hermite -- Gauss-Hermite (physicst's) quadrature.
roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
roots_gegenbauer -- Gauss-Gegenbauer quadrature.
roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature.
roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature.
roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature.
The functions below, in turn, return the polynomial coefficients in
:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.
The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial.
chebyt -- [+]Chebyshev polynomial of the first kind.
chebyu -- [+]Chebyshev polynomial of the second kind.
chebyc -- [+]Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
chebys -- [+]Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
jacobi -- [+]Jacobi polynomial.
laguerre -- [+]Laguerre polynomial.
genlaguerre -- [+]Generalized (associated) Laguerre polynomial.
hermite -- [+]Physicist's Hermite polynomial.
hermitenorm -- [+]Normalized (probabilist's) Hermite polynomial.
gegenbauer -- [+]Gegenbauer (ultraspherical) polynomial.
sh_legendre -- [+]Shifted Legendre polynomial.
sh_chebyt -- [+]Shifted Chebyshev polynomial of the first kind.
sh_chebyu -- [+]Shifted Chebyshev polynomial of the second kind.
sh_jacobi -- [+]Shifted Jacobi polynomial.
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x)
hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind
hyp0f1 -- Confluent hypergeometric limit function 0F1.
hyp2f0 -- Hypergeometric function 2F0 in y and an error estimate
hyp1f2 -- Hypergeometric function 1F2 and error estimate
hyp3f0 -- Hypergeometric function 3F0 in y and an error estimate
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function D
pbvv -- Parabolic cylinder function V
pbwa -- Parabolic cylinder function W
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Parabolic cylinder functions Dv(x) and derivatives.
pbvv_seq -- [+]Parabolic cylinder functions Vv(x) and derivatives.
pbdn_seq -- [+]Parabolic cylinder functions Dn(z) and derivatives.
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic value of even Mathieu functions
mathieu_b -- Characteristic value of odd Mathieu functions
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
mathieu_odd_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function and its derivative
mathieu_sem -- Odd Mathieu function and its derivative
mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative
mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative
mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative
mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative
pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative
pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative
obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative
obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative
obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative.
pro_cv -- Characteristic value of prolate spheroidal function
obl_cv -- Characteristic value of oblate spheroidal function
pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- Kelvin functions as complex numbers
kelvin_zeros -- [+]Compute nt zeros of all Kelvin functions.
ber -- Kelvin function ber.
bei -- Kelvin function bei
berp -- Derivative of the Kelvin function `ber`
beip -- Derivative of the Kelvin function `bei`
ker -- Kelvin function ker
kei -- Kelvin function ker
kerp -- Derivative of the Kelvin function ker
keip -- Derivative of the Kelvin function kei
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Compute nt zeros of the Kelvin function ber(x).
bei_zeros -- [+]Compute nt zeros of the Kelvin function bei(x).
berp_zeros -- [+]Compute nt zeros of the Kelvin function ber'(x).
beip_zeros -- [+]Compute nt zeros of the Kelvin function bei'(x).
ker_zeros -- [+]Compute nt zeros of the Kelvin function ker(x).
kei_zeros -- [+]Compute nt zeros of the Kelvin function kei(x).
kerp_zeros -- [+]Compute nt zeros of the Kelvin function ker'(x).
keip_zeros -- [+]Compute nt zeros of the Kelvin function kei'(x).
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]The number of combinations of N things taken k at a time.
perm -- [+]Permutations of N things taken k at a time, i.e., k-permutations of N.
Lambert W and Related Functions
-------------------------------
.. autosummary::
:toctree: generated/
lambertw -- Lambert W function.
wrightomega -- Wright Omega function.
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic, Geometric Mean.
bernoulli -- Bernoulli numbers B0..Bn (inclusive).
binom -- Binomial coefficient
diric -- Periodic sinc function, also called the Dirichlet function.
euler -- Euler numbers E0..En (inclusive).
expn -- Exponential integral E_n
exp1 -- Exponential integral E_1 of complex argument z
expi -- Exponential integral Ei
factorial -- The factorial of a number or array of numbers.
factorial2 -- Double factorial.
factorialk -- [+]Multifactorial of n of order k, n(!!...!).
shichi -- Hyperbolic sine and cosine integrals.
sici -- Sine and cosine integrals.
spence -- Spence's function, also known as the dilogarithm.
zeta -- Riemann zeta function.
zetac -- Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root of `x`
exp10 -- 10**x
exp2 -- 2**x
radian -- Convert from degrees to radians
cosdg -- Cosine of the angle `x` given in degrees.
sindg -- Sine of angle given in degrees
tandg -- Tangent of angle x given in degrees.
cotdg -- Cotangent of the angle `x` given in degrees.
log1p -- Calculates log(1+x) for use when `x` is near zero
expm1 -- exp(x) - 1 for use when `x` is near zero.
cosm1 -- cos(x) - 1 for use when `x` is near zero.
round -- Round to nearest integer
xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
sinc -- Return the sinc function.
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
"""
from __future__ import division, print_function, absolute_import
from ._ufuncs import *
from .basic import *
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal
from .lambertw import lambertw
from ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in,
spherical_kn)
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from numpy.testing import Tester
test = Tester().test
|
bkendzior/scipy
|
scipy/special/__init__.py
|
Python
|
bsd-3-clause
| 26,955
|
[
"Gaussian"
] |
7b7126c6b6f82b7969121dbd5c2dff424ff6a7600ab4943707a0c24016362d35
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from txtalert.core.models import (Patient, Clinic, MSISDN,
VISIT_STATUS_CHOICES, Visit)
from optparse import make_option
from django.utils import timezone
from datetime import datetime, timedelta
import random
import sys
from uuid import uuid1
def sample(items):
return random.sample(items, 1).pop()
NAMES = ['Aaliyah', 'Abayomi', 'Abebe', 'Abebi', 'Abena', 'Abeo', 'Ada',
'Adah', 'Adana', 'Adanna', 'Adanya', 'Akili', 'Alika', 'Ama',
'Amadi', 'Amai', 'Amare', 'Amari', 'Abayomi', 'Abiola', 'Abu',
'Ade', 'Adeben', 'Adiel', 'Amarey', 'Amari', 'Aren', 'Azibo',
'Bobo', 'Chiamaka', 'Chibale', 'Chidi', 'Chike', 'Dakarai',
'Davu', 'Deion', 'Dembe', 'Diallo']
SURNAMES = ['Azikiwe','Awolowo','Bello','Balewa','Akintola','Okotie-Eboh',
'Nzeogwu','Onwuatuegwu','Okafor','Okereke','Okeke','Okonkwo',
'Okoye','Okorie','Obasanjo','Babangida','Buhari','Dimka','Okar',
'Diya','Odili','Ibori','Igbinedion','Alamieyeseigha','Yar\'Adua',
'Asari-Dokubo','Jomo-Gbomo','Anikulapo-Kuti','Iwu','Anenih',
'Bamgboshe','Biobaku','Tinibu','Akinjide','Akinyemi','Akiloye',
'Adeyemi','Adesida','Omehia','Sekibo','Amaechi','Bankole','Nnamani',
'Ayim','Okadigbo','Ironsi','Ojukwu','Danjuma','Effiong','Akpabio',
'Attah','Chukwumereije','Akunyili','Iweala','Okonjo','Ezekwesili',
'Achebe','Soyinka','Solarin','Gbadamosi','Olanrewaju','Magoro',
'Madaki','Jang','Oyinlola','Oyenusi','Onyejekwe','Onwudiwe',
'Jakande','Kalejaiye','Igwe','Eze','Obi','Ngige','Uba','Kalu',
'Orji','Ohakim','Egwu','Adesina','Adeoye','Falana','Fagbure',
'Jaja','Okilo','Okiro','Balogun','Alakija','Akenzua','Akerele',
'Ademola','Onobanjo','Aguda','Okpara','Mbanefo','Mbadinuju','Boro',
'Ekwensi','Gowon', 'Saro-Wiwa']
class Command(BaseCommand):
help = "Generate sample data to populate a new installation " \
"of txtAlert:bookings for demo purposes"
option_list = BaseCommand.option_list + (
make_option('--owner', dest='owner', help='Who should own these patients?'),
make_option('--patients', default=50, dest='patients',
help='How many patients to create'),
make_option('--visits', default=100, dest='visits',
help='How many visits to create per patient'),
make_option('--change-requests', default=1, dest='change_requests',
help='How many change requests to create per visit')
)
def handle(self, *args, **options):
if options.get('owner'):
self.owner = User.objects.get(username=options['owner'])
else:
print 'Please provide --owner=<username>'
sys.exit(1)
self.clinics = Clinic.objects.all()
for patient in self.create_patients(int(options['patients'])):
visits = list(self.create_visits(patient, int(options['visits'])))
list(self.create_change_requests(visits, int(options['change_requests'])))
def create_patients(self, limit):
for i in range(limit):
msisdn, _ = MSISDN.objects.get_or_create(msisdn=(27761000000 + i))
print 'patient %s of %s' % (i,limit)
yield Patient.objects.create(
name = sample(NAMES),
surname = sample(SURNAMES),
owner = self.owner,
te_id = 'bookings-%s' % Patient.objects.count(),
active_msisdn = msisdn,
regiment = sample(Patient.REGIMENT_CHOICES)[0],
sex = sample(['m','f']),
age = i,
last_clinic = sample(self.clinics),
)
def create_visits(self, patient, limit):
for i in range(limit):
date = timezone.now() + timedelta(days=((-1 * i)+10))
if date < timezone.now():
status = sample(VISIT_STATUS_CHOICES)[0]
else:
status = 's'
yield patient.visit_set.create(
te_visit_id = 'visit-%s' % Visit.objects.count(),
date = date,
status = status,
visit_type = sample(Visit.VISIT_TYPES)[0],
clinic = patient.last_clinic
)
def create_change_requests(self, visits, limit):
visits = list(visits)
for i in range(limit):
request = sample(['earlier','later'])
visit = visits.pop()
fn = getattr(visit, 'reschedule_%s' % request)
yield fn()
|
praekelt/txtalert
|
txtalert/apps/bookings/management/commands/create_bookings_sample_data.py
|
Python
|
gpl-3.0
| 4,735
|
[
"VisIt"
] |
78fc9e34f1e2f8072abd1127695452f7b3aab3ade95675571e93879b6b7abaa2
|
# -*- coding:utf-8 -*-
# Copyright (c) 2015, Galaxy Authors. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Author: [email protected]
# Date: 2015-04-30
import rpc_server
import thread
import socket
import time
import threading
from galaxy import agent_pb2
from galaxy import master_pb2
from galaxy import task_pb2
from sofa.pbrpc import client
#mock agent
class AgentImpl(agent_pb2.Agent):
def __init__(self,cpu,mem,port,master_addr):
self._cpu = cpu
self._mem = mem
self._master_addr = master_addr
self._mutex = thread.allocate_lock()
self._channel = client.Channel(master_addr)
self._version = 0
self._my_addr = "%s:%d"%(socket.gethostname(),port)
self._agent_id = -1
self._task_status = {}
def HeartBeat(self):
while True:
with self._mutex:
master = master_pb2.Master_Stub(self._channel)
controller = client.Controller()
controller.SetTimeout(100)
req = master_pb2.HeartBeatRequest()
req.cpu_share = self._cpu
req.mem_share = self._mem
req.version = self._version
req.agent_addr = self._my_addr
status_list = []
for key in self._task_status:
print "running task %s "%key
status_list.append(self._task_status[key])
req.task_status.extend(status_list)
response = master.HeartBeat(controller,req)
self._agent_id = response.agent_id
self._version = response.version
print "heart beat version %s agent %s"%(self._version,self._agent_id)
time.sleep(1)
def RunTask(self,ctrl,req,done):
with self._mutex:
print "run task %d"%req.task_id
response = agent_pb2.RunTaskResponse()
response.status = 0
status = task_pb2.TaskStatus()
status.task_id = req.task_id
status.cpu_usage = 0
status.status = 2
status.memory_usage = 0
self._task_status[status.task_id] = status
return response
def KillTask(self,ctrl,req,done):
with self._mutex:
del self._task_status[req.task_id]
response = agent_pb2.KillTaskResponse()
response.status = 0
print "kill task %d"%req.task_id
return response
if __name__ == "__main__":
agent = AgentImpl(5,1024*1024*1024*10,9527,"localhost:8102")
heartbeat_t = threading.Thread(target=agent.HeartBeat)
heartbeat_t.daemon = True
heartbeat_t.start()
server = rpc_server.RpcServer(9527,host="0.0.0.0")
try:
server.add_service(agent)
server.start()
except KeyboardInterrupt:
server.stop()
|
fxsjy/galaxy
|
test/integeration/agent.py
|
Python
|
bsd-3-clause
| 2,935
|
[
"Galaxy"
] |
bba4d5ad501ce7acf48676f80bdbc6043699375918a993b8563954ec98e9a23d
|
from __future__ import print_function, division
import copy
from collections import defaultdict
from sympy.core.containers import Dict
from sympy.core.compatibility import is_sequence, as_int
from sympy.core.singleton import S
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.utilities.exceptions import SymPyDeprecationWarning
from .matrices import MatrixBase, ShapeError, a2idx
from .dense import Matrix
import collections
class SparseMatrix(MatrixBase):
"""
A sparse matrix (a matrix with a large number of zero elements).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(2, 2, range(4))
Matrix([
[0, 1],
[2, 3]])
>>> SparseMatrix(2, 2, {(1, 1): 2})
Matrix([
[0, 0],
[0, 2]])
See Also
========
sympy.matrices.dense.Matrix
"""
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], SparseMatrix):
self.rows = args[0].rows
self.cols = args[0].cols
self._smat = dict(args[0]._smat)
return
self._smat = {}
if len(args) == 3:
self.rows = as_int(args[0])
self.cols = as_int(args[1])
if isinstance(args[2], collections.Callable):
op = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(op(i, j))
if value:
self._smat[(i, j)] = value
elif isinstance(args[2], (dict, Dict)):
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
v = args[2][key]
if v:
self._smat[key] = v
elif is_sequence(args[2]):
if len(args[2]) != self.rows*self.cols:
raise ValueError(
'List length (%s) != rows*columns (%s)' %
(len(args[2]), self.rows*self.cols))
flat_list = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(flat_list[i*self.cols + j])
if value:
self._smat[(i, j)] = value
else:
# handle full matrix forms with _handle_creation_inputs
r, c, _list = Matrix._handle_creation_inputs(*args)
self.rows = r
self.cols = c
for i in range(self.rows):
for j in range(self.cols):
value = _list[self.cols*i + j]
if value:
self._smat[(i, j)] = value
def __getitem__(self, key):
if type(key) is tuple:
i, j = key
if isinstance(i, int) and isinstance(j, int):
i, j = self.key2ij(key)
rv = self._smat.get((i, j), S.Zero)
return rv
elif isinstance(i, slice) or isinstance(j, slice):
return self.submatrix(key)
# check for single arg, like M[:] or M[3]
if isinstance(key, slice):
lo, hi = key.indices(len(self))[:2]
L = []
for i in range(lo, hi):
m, n = divmod(i, self.cols)
L.append(self._smat.get((m, n), S.Zero))
return L
i, j = divmod(a2idx(key, len(self)), self.cols)
return self._smat.get((i, j), S.Zero)
def __setitem__(self, key, value):
raise NotImplementedError()
def copy(self):
return self._new(self.rows, self.cols, self._smat)
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self) == self.rows
def tolist(self):
"""Convert this sparse matrix into a list of nested Python lists.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.tolist()
[[1, 2], [3, 4]]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> SparseMatrix(ones(0, 3)).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
I, J = self.shape
return [[self[i, j] for j in range(J)] for i in range(I)]
def row(self, i):
"""Returns column i from self as a row vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.row(0)
Matrix([[1, 2]])
See Also
========
col
row_list
"""
return self[i,:]
def col(self, j):
"""Returns column j from self as a column vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.col(0)
Matrix([
[1],
[3]])
See Also
========
row
col_list
"""
return self[:, j]
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
row_op
col_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(k))]
RL = property(row_list, None, None, "Alternate faster representation")
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
col_op
row_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(reversed(k)))]
CL = property(col_list, None, None, "Alternate faster representation")
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> from sympy.matrices import eye
>>> eye(3).trace()
3
"""
trace = S.Zero
for i in range(self.cols):
trace += self._smat.get((i, i), 0)
return trace
def _eval_transpose(self):
"""Returns the transposed SparseMatrix of this SparseMatrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.T
Matrix([
[1, 3],
[2, 4]])
"""
tran = self.zeros(self.cols, self.rows)
for key, value in self._smat.items():
key = key[1], key[0] # reverse
tran._smat[key] = value
return tran
def _eval_conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
conj = self.copy()
for key, value in self._smat.items():
conj._smat[key] = value.conjugate()
return conj
def multiply(self, other):
"""Fast multiplication exploiting the sparsity of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> A, B = SparseMatrix(ones(4, 3)), SparseMatrix(ones(3, 4))
>>> A.multiply(B) == 3*ones(4)
True
See Also
========
add
"""
A = self
B = other
# sort B's row_list into list of rows
Blist = [[] for i in range(B.rows)]
for i, j, v in B.row_list():
Blist[i].append((j, v))
Cdict = defaultdict(int)
for k, j, Akj in A.row_list():
for n, Bjn in Blist[j]:
temp = Akj*Bjn
Cdict[k, n] += temp
rv = self.zeros(A.rows, B.cols)
rv._smat = dict([(k, v) for k, v in Cdict.items() if v])
return rv
def scalar_multiply(self, scalar):
"Scalar element-wise multiplication"
M = self.zeros(*self.shape)
if scalar:
for i in self._smat:
v = scalar*self._smat[i]
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def __mul__(self, other):
"""Multiply self and other, watching for non-matrix entities.
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, zeros
>>> I = SparseMatrix(eye(3))
>>> I*I == I
True
>>> Z = zeros(3)
>>> I*Z
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> I*2 == 2*I
True
"""
if isinstance(other, SparseMatrix):
return self.multiply(other)
if isinstance(other, MatrixBase):
return other._new(self*self._new(other))
return self.scalar_multiply(other)
def __rmul__(self, other):
"""Return product the same type as other (if a Matrix).
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import Matrix, SparseMatrix
>>> A = Matrix(2, 2, range(1, 5))
>>> S = SparseMatrix(2, 2, range(2, 6))
>>> A*S == S*A
False
>>> (isinstance(A*S, SparseMatrix) ==
... isinstance(S*A, SparseMatrix) == False)
True
"""
if isinstance(other, MatrixBase):
return other*other._new(self)
return self.scalar_multiply(other)
def __add__(self, other):
"""Add other to self, efficiently if possible.
When adding a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> A = SparseMatrix(eye(3)) + SparseMatrix(eye(3))
>>> B = SparseMatrix(eye(3)) + eye(3)
>>> A
Matrix([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2]])
>>> A == B
True
>>> isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix)
False
"""
if isinstance(other, SparseMatrix):
return self.add(other)
elif isinstance(other, MatrixBase):
return other._new(other + self)
else:
raise NotImplementedError(
"Cannot add %s to %s" %
tuple([c.__class__.__name__ for c in (other, self)]))
def __neg__(self):
"""Negate all elements of self.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> -SparseMatrix(eye(3))
Matrix([
[-1, 0, 0],
[ 0, -1, 0],
[ 0, 0, -1]])
"""
rv = self.copy()
for k, v in rv._smat.items():
rv._smat[k] = -v
return rv
def add(self, other):
"""Add two sparse matrices with dictionary representation.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, ones
>>> SparseMatrix(eye(3)).add(SparseMatrix(ones(3)))
Matrix([
[2, 1, 1],
[1, 2, 1],
[1, 1, 2]])
>>> SparseMatrix(eye(3)).add(-SparseMatrix(eye(3)))
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
Only the non-zero elements are stored, so the resulting dictionary
that is used to represent the sparse matrix is empty:
>>> _._smat
{}
See Also
========
multiply
"""
if not isinstance(other, SparseMatrix):
raise ValueError('only use add with %s, not %s' %
tuple([c.__class__.__name__ for c in (self, other)]))
if self.shape != other.shape:
raise ShapeError()
M = self.copy()
for i, v in other._smat.items():
v = M[i] + v
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def submatrix(self, keys):
rlo, rhi, clo, chi = self.key2bounds(keys)
r, c = rhi - rlo, chi - clo
if r*c < len(self._smat):
# the subregion is smaller than the number of elements in self
if r == 1:
getter = lambda i, j: self[rlo, j + clo]
elif c == 1:
getter = lambda i, j: self[i + rlo, clo]
else:
getter = lambda i, j: self[i + rlo, j + clo]
return self._new(r, c, getter)
else:
# the number of non-zero elements is smaller than the subregion
smat = {}
for rk, ck in self._smat:
if rlo <= rk < rhi and clo <= ck < chi:
smat[(rk-rlo, ck-clo)] = self._smat[(rk, ck)]
return self._new(r, c, smat)
def is_symmetric(self, simplify=True):
"""Return True if self is symmetric.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> M = SparseMatrix(eye(3))
>>> M.is_symmetric()
True
>>> M[0, 2] = 1
>>> M.is_symmetric()
False
"""
if simplify:
return all((k[1], k[0]) in self._smat and
not (self[k] - self[(k[1], k[0])]).simplify()
for k in self._smat)
else:
return all((k[1], k[0]) in self._smat and
self[k] == self[(k[1], k[0])] for k in self._smat)
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(self[key].has(*patterns) for key in self._smat)
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self.copy()
for k, v in self._smat.items():
fv = f(v)
if fv:
out._smat[k] = fv
else:
out._smat.pop(k, None)
return out
def reshape(self, rows, cols):
"""Reshape matrix while retaining original size.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix(4, 2, range(8))
>>> S.reshape(2, 4)
Matrix([
[0, 1, 2, 3],
[4, 5, 6, 7]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
smat = {}
for k, v in self._smat.items():
i, j = k
n = i*self.cols + j
ii, jj = divmod(n, cols)
smat[(ii, jj)] = self._smat[(i, j)]
return self._new(rows, cols, smat)
def liupc(self):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582,
downloaded from http://tinyurl.com/9o2jsxj
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(self.rows)]
for r, c, _ in self.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*self.rows
virtual = [inf]*self.rows
for r in range(self.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def row_structure_symbolic_cholesky(self):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582,
downloaded from http://tinyurl.com/9o2jsxj
"""
R, parent = self.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(self.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = list(sorted(set(Lrow[k])))
return Lrow
def _cholesky_sparse(self):
"""Algorithm for numeric Cholesky factorization of a sparse matrix."""
Crowstruc = self.row_structure_symbolic_cholesky()
C = self.zeros(self.rows)
for i in range(len(Crowstruc)):
for j in Crowstruc[i]:
if i != j:
C[i, j] = self[i, j]
summ = 0
for p1 in Crowstruc[i]:
if p1 < j:
for p2 in Crowstruc[j]:
if p2 < j:
if p1 == p2:
summ += C[i, p1]*C[j, p1]
else:
break
else:
break
C[i, j] -= summ
C[i, j] /= C[j, j]
else:
C[j, j] = self[j, j]
summ = 0
for k in Crowstruc[j]:
if k < j:
summ += C[j, k]**2
else:
break
C[j, j] -= summ
C[j, j] = sqrt(C[j, j])
return C
def _LDL_sparse(self):
"""Algorithm for numeric LDL factization, exploiting sparse structure.
"""
Lrowstruc = self.row_structure_symbolic_cholesky()
L = self.eye(self.rows)
D = self.zeros(self.rows, self.cols)
for i in range(len(Lrowstruc)):
for j in Lrowstruc[i]:
if i != j:
L[i, j] = self[i, j]
summ = 0
for p1 in Lrowstruc[i]:
if p1 < j:
for p2 in Lrowstruc[j]:
if p2 < j:
if p1 == p2:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] -= summ
L[i, j] /= D[j, j]
elif i == j:
D[i, i] = self[i, i]
summ = 0
for k in Lrowstruc[i]:
if k < i:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] -= summ
return L, D
def _lower_triangular_solve(self, rhs):
"""Fast algorithm for solving a lower-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i > j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows):
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Fast algorithm for solving an upper-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i < j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows - 1, -1, -1):
rows[i].reverse()
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _diagonal_solve(self, rhs):
"Diagonal solve."
return self._new(self.rows, 1, lambda i, j: rhs[i, 0] / self[i, i])
def _cholesky_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L = self._cholesky_sparse()
Y = L._lower_triangular_solve(rhs)
rv = L.T._upper_triangular_solve(Y)
return rv
def _LDL_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L, D = self._LDL_sparse()
Z = L._lower_triangular_solve(rhs)
Y = D._diagonal_solve(Z)
return L.T._upper_triangular_solve(Y)
def cholesky(self):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('Cholesky decomposition applies only to '
'symmetric matrices.')
M = self.as_mutable()._cholesky_sparse()
if M.has(nan) or M.has(oo):
raise ValueError('Cholesky decomposition applies only to '
'positive-definite matrices')
return self._new(M)
def LDLdecomposition(self):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('LDL decomposition applies only to '
'symmetric matrices.')
L, D = self.as_mutable()._LDL_sparse()
if L.has(nan) or L.has(oo) or D.has(nan) or D.has(oo):
raise ValueError('LDL decomposition applies only to '
'positive-definite matrices')
return self._new(L), self._new(D)
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import SparseMatrix, Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using Cholesky or LDL (default)
decomposition as selected with the ``method`` keyword: 'CH' or 'LDL',
respectively.
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix([
... [ 2, -1, 0],
... [-1, 2, -1],
... [ 0, 0, 2]])
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv(method='LDL') # use of 'method=' is optional
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A * _
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
sym = self.is_symmetric()
M = self.as_mutable()
I = M.eye(M.rows)
if not sym:
t = M.T
r1 = M[0, :]
M = t*M
I = t*I
method = kwargs.get('method', 'LDL')
if method in "LDL":
solve = M._LDL_solve
elif method == "CH":
solve = M._cholesky_solve
else:
raise NotImplementedError(
'Method may be "CH" or "LDL", not %s.' % method)
rv = M.hstack(*[solve(I[:, i]) for i in range(I.cols)])
if not sym:
scale = (r1*rv[:, 0])[0, 0]
rv /= scale
return self._new(rv)
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, SparseMatrix):
return self._smat == other._smat
elif isinstance(other, MatrixBase):
return self._smat == MutableSparseMatrix(other)._smat
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self._smat)
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
if is_sequence(r):
SymPyDeprecationWarning(
feature="The syntax zeros([%i, %i])" % tuple(r),
useinstead="zeros(%i, %i)." % tuple(r),
issue=3381, deprecated_since_version="0.7.2",
).warn()
r, c = r
else:
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls(r, c, {})
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
return cls(n, n, dict([((i, i), S.One) for i in range(n)]))
class MutableSparseMatrix(SparseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args)
def as_mutable(self):
return self.copy()
def __setitem__(self, key, value):
"""Assign value to position designated by key.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> M = SparseMatrix(2, 2, {})
>>> M[1] = 1; M
Matrix([
[0, 1],
[0, 0]])
>>> M[1, 1] = 2; M
Matrix([
[0, 1],
[0, 2]])
>>> M = SparseMatrix(2, 2, {})
>>> M[:, 1] = [1, 1]; M
Matrix([
[0, 1],
[0, 1]])
>>> M = SparseMatrix(2, 2, {})
>>> M[1, :] = [[1, 1]]; M
Matrix([
[0, 0],
[1, 1]])
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = SparseMatrix(4, 4, {})
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
if value:
self._smat[(i, j)] = value
elif (i, j) in self._smat:
del self._smat[(i, j)]
__hash__ = None
def row_del(self, k):
"""Delete the given row of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.row_del(0)
>>> M
Matrix([[0, 1]])
See Also
========
col_del
"""
newD = {}
k = a2idx(k, self.rows)
for (i, j) in self._smat:
if i == k:
pass
elif i > k:
newD[i - 1, j] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.rows -= 1
def col_del(self, k):
"""Delete the given column of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.col_del(0)
>>> M
Matrix([
[0],
[1]])
See Also
========
row_del
"""
newD = {}
k = a2idx(k, self.cols)
for (i, j) in self._smat:
if j == k:
pass
elif j > k:
newD[i, j - 1] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.cols -= 1
def row_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.row_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
"""
if i > j:
i, j = j, i
rows = self.row_list()
temp = []
for ii, jj, v in rows:
if ii == i:
self._smat.pop((ii, jj))
temp.append((jj, v))
elif ii == j:
self._smat.pop((ii, jj))
self._smat[i, jj] = v
elif ii > j:
break
for k, v in temp:
self._smat[j, k] = v
def col_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.col_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
"""
if i > j:
i, j = j, i
rows = self.col_list()
temp = []
for ii, jj, v in rows:
if jj == i:
self._smat.pop((ii, jj))
temp.append((ii, v))
elif jj == j:
self._smat.pop((ii, jj))
self._smat[ii, i] = v
elif jj > j:
break
for k, v in temp:
self._smat[k, j] = v
def row_join(self, other):
"""Returns B appended after A (column-wise augmenting)::
[A B]
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
>>> A
Matrix([
[1, 0, 1],
[0, 1, 0],
[1, 1, 0]])
>>> B = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.row_join(B); C
Matrix([
[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
>>> C == A.row_join(Matrix(B))
True
Joining at row ends is the same as appending columns at the end
of the matrix:
>>> C == A.col_insert(A.cols, B)
True
"""
A, B = self, other
if not A.rows == B.rows:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i, j + A.cols)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[(i, j + A.cols)] = v
A.cols += B.cols
return A
def col_join(self, other):
"""Returns B augmented beneath A (row-wise joining)::
[A]
[B]
Examples
========
>>> from sympy import SparseMatrix, Matrix, ones
>>> A = SparseMatrix(ones(3))
>>> A
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> B = SparseMatrix.eye(3)
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.col_join(B); C
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C == A.col_join(Matrix(B))
True
Joining along columns is the same as appending rows at the end
of the matrix:
>>> C == A.row_insert(A.rows, Matrix(B))
True
"""
A, B = self, other
if not A.cols == B.cols:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i + A.rows, j)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[i + A.rows, j] = v
A.rows += B.rows
return A
def copyin_list(self, key, value):
if not is_sequence(value):
raise TypeError("`value` must be of type list or tuple.")
self.copyin_matrix(key, Matrix(value))
def copyin_matrix(self, key, value):
# include this here because it's not part of BaseMatrix
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(
"The Matrix `value` doesn't have the same dimensions "
"as the in sub-Matrix given by `key`.")
if not isinstance(value, SparseMatrix):
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
else:
if (rhi - rlo)*(chi - clo) < len(self):
for i in range(rlo, rhi):
for j in range(clo, chi):
self._smat.pop((i, j), None)
else:
for i, j, v in self.row_list():
if rlo <= i < rhi and clo <= j < chi:
self._smat.pop((i, j), None)
for k, v in value._smat.items():
i, j = k
self[i + rlo, j + clo] = value[i, j]
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
row_op
col_op
"""
self.row_op(i, lambda v, j: f(v, self[k, j]))
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
zip_row_op
col_op
"""
for j in range(self.cols):
v = self._smat.get((i, j), S.Zero)
fv = f(v, j)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i) for i in range(self.rows).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[1, 0] = -1
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[ 2, 4, 0],
[-1, 0, 0],
[ 0, 0, 2]])
"""
for i in range(self.rows):
v = self._smat.get((i, j), S.Zero)
fv = f(v, i)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def fill(self, value):
"""Fill self with the given value.
Notes
=====
Unless many values are going to be deleted (i.e. set to zero)
this will create a matrix that is slower than a dense matrix in
operations.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.zeros(3); M
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> M.fill(1); M
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
if not value:
self._smat = {}
else:
v = self._sympify(value)
self._smat = dict([((i, j), v)
for i in range(self.rows) for j in range(self.cols)])
|
hrashk/sympy
|
sympy/matrices/sparse.py
|
Python
|
bsd-3-clause
| 42,913
|
[
"DIRAC"
] |
645d9908b7b9e2cb791022f652fc59e4db64b047451addf52a74c62b08bc7389
|
""" Test the FTS3Utilities"""
from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File
from DIRAC import S_OK, S_ERROR
__RCSID__ = "$Id $"
import unittest
import mock
import datetime
from DIRAC.DataManagementSystem.private.FTS3Utilities import FTS3JSONDecoder, \
FTS3Serializable, \
groupFilesByTarget, \
selectUniqueRandomSource, \
FTS3ServerPolicy
import json
class FakeClass(FTS3Serializable):
""" Just a fake class"""
_attrToSerialize = ['string', 'date', 'dic', 'sub']
def __init__(self):
self.string = ''
self.date = None
self.dic = {}
class TestFTS3Serialization(unittest.TestCase):
""" Test the FTS3 JSON serialization mechanizme with FTS3JSONEncoder,
FTS3JSONDecoder, FTS3Serializable"""
def test_01_basic(self):
""" Basic json transfer"""
obj = FakeClass()
obj.string = 'tata'
obj.date = datetime.datetime.utcnow().replace(microsecond=0)
obj.dic = {'a': 1}
obj.notSerialized = 'Do not'
obj2 = json.loads(obj.toJSON(), cls=FTS3JSONDecoder)
self.assertTrue(obj.string == obj2.string)
self.assertTrue(obj.date == obj2.date)
self.assertTrue(obj.dic == obj2.dic)
self.assertTrue(not hasattr(obj2, 'notSerialized'))
def test_02_subobjects(self):
""" Try setting as attribute an object """
class NonSerializable(object):
""" Fake class not inheriting from FTS3Serializable"""
pass
obj = FakeClass()
obj.sub = NonSerializable()
with self.assertRaises(TypeError):
obj.toJSON()
obj.sub = FakeClass()
obj.sub.string = 'pipo'
obj2 = json.loads(obj.toJSON(), cls=FTS3JSONDecoder)
self.assertTrue(obj.sub.string == obj2.sub.string)
def mock__checkSourceReplicas(ftsFiles):
succ = {}
failed = {}
for ftsFile in ftsFiles:
if hasattr(ftsFile, 'fakeAttr_possibleSources'):
succ[ftsFile.lfn] = dict.fromkeys(getattr(ftsFile, 'fakeAttr_possibleSources'))
else:
failed[ftsFile.lfn] = 'No such file or directory'
return S_OK({'Successful': succ, 'Failed': failed})
class TestFileGrouping(unittest.TestCase):
""" Testing all the grouping functions of FTS3Utilities
"""
def setUp(self):
self.f1 = FTS3File()
self.f1.fakeAttr_possibleSources = ['Src1', 'Src2']
self.f1.lfn = 'f1'
self.f1.targetSE = 'target1'
self.f2 = FTS3File()
self.f2.fakeAttr_possibleSources = ['Src2', 'Src3']
self.f2.lfn = 'f2'
self.f2.targetSE = 'target2'
self.f3 = FTS3File()
self.f3.fakeAttr_possibleSources = ['Src4']
self.f3.lfn = 'f3'
self.f3.targetSE = 'target1'
# File does not exist :-)
self.f4 = FTS3File()
self.f4.lfn = 'f4'
self.f4.targetSE = 'target3'
self.allFiles = [self.f1, self.f2, self.f3, self.f4]
def test_01_groupFilesByTarget(self):
# empty input
self.assertTrue(groupFilesByTarget([])['Value'] == {})
res = groupFilesByTarget(self.allFiles)
self.assertTrue(res['OK'])
groups = res['Value']
self.assertTrue(self.f1 in groups['target1'])
self.assertTrue(self.f2 in groups['target2'])
self.assertTrue(self.f3 in groups['target1'])
self.assertTrue(self.f4 in groups['target3'])
@mock.patch(
'DIRAC.DataManagementSystem.private.FTS3Utilities._checkSourceReplicas',
side_effect=mock__checkSourceReplicas)
def test_04_selectUniqueRandomSource(self, _mk_checkSourceReplicas):
""" Suppose they all go to the same target """
res = selectUniqueRandomSource(self.allFiles)
self.assertTrue(res['OK'])
uniqueSources = res['Value']
# There should be only f1,f2 and f3
allReturnedFiles = []
existingFiles = [self.f1, self.f2, self.f3]
for srcSe, ftsFiles in uniqueSources.iteritems():
allReturnedFiles.extend(ftsFiles)
# No files should be duplicated and all files should be there, except the non existing one
self.assertEqual(len(existingFiles), len(allReturnedFiles))
self.assertEqual(set(existingFiles), set(allReturnedFiles))
filesInSrc1 = uniqueSources.get('Src1', [])
filesInSrc2 = uniqueSources.get('Src2', [])
filesInSrc3 = uniqueSources.get('Src3', [])
filesInSrc4 = uniqueSources.get('Src4', [])
# f1
self.assertTrue(self.f1 in filesInSrc1 + filesInSrc2)
self.assertTrue(self.f2 in filesInSrc2 + filesInSrc3)
self.assertTrue(self.f3 in filesInSrc4)
def mock__failoverServerPolicy(_attempt):
return "server_0"
def mock__randomServerPolicy(_attempt):
return "server_0"
def mock__sequenceServerPolicy(_attempt):
return "server_0"
def mock__OKFTSServerStatus(ftsServer):
return S_OK(ftsServer)
def mock__ErrorFTSServerStatus(ftsServer):
return S_ERROR(ftsServer)
class TestFTS3ServerPolicy (unittest.TestCase):
""" Testing FTS3 ServerPolicy selection """
def setUp(self):
self.fakeServerDict = {"server_0": "server0.cern.ch",
"server_1": "server1.cern.ch",
"server_2": "server2.cern.ch"}
@mock.patch(
'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus',
side_effect=mock__OKFTSServerStatus)
@mock.patch(
'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._sequenceServerPolicy',
side_effect=mock__sequenceServerPolicy)
@mock.patch(
'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._randomServerPolicy',
side_effect=mock__randomServerPolicy)
@mock.patch(
'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._failoverServerPolicy',
side_effect=mock__failoverServerPolicy)
def testCorrectServerPolicyIsUsed(
self,
mockFailoverFunc,
mockRandomFunc,
mockSequenceFunc,
mockFTSServerStatus):
" Test correct server policy method is called "
obj = FTS3ServerPolicy(self.fakeServerDict, "Sequence")
obj.chooseFTS3Server()
self.assertTrue(mockSequenceFunc.called)
obj = FTS3ServerPolicy(self.fakeServerDict, "Random")
obj.chooseFTS3Server()
self.assertTrue(mockRandomFunc.called)
obj = FTS3ServerPolicy(self.fakeServerDict, "Failover")
obj.chooseFTS3Server()
self.assertTrue(mockFailoverFunc.called)
# random policy should be selected for an invalid policy
obj = FTS3ServerPolicy(self.fakeServerDict, "InvalidPolicy")
obj.chooseFTS3Server()
self.assertTrue(mockRandomFunc.called)
@mock.patch(
'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus',
side_effect=mock__ErrorFTSServerStatus)
def testFailoverServerPolicy(self, mockFTSServerStatus):
""" Test if the failover server policy returns server at a given position"""
obj = FTS3ServerPolicy(self.fakeServerDict, "Failover")
for i in range(len(self.fakeServerDict)):
self.assertEquals('server_%d' % i, obj._failoverServerPolicy(i))
@mock.patch(
'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus',
side_effect=mock__ErrorFTSServerStatus)
def testSequenceServerPolicy(self, mockFTSServerStatus):
""" Test if the sequence server policy selects the servers Sequentially """
obj = FTS3ServerPolicy(self.fakeServerDict, "Sequence")
for i in range(len(self.fakeServerDict)):
self.assertEquals('server_%d' % i, obj._sequenceServerPolicy(i))
self.assertEquals('server_0', obj._sequenceServerPolicy(i))
@mock.patch(
'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus',
side_effect=mock__ErrorFTSServerStatus)
def testRandomServerPolicy(self, mockFTSServerStatus):
""" Test if the random server policy does not selects the same server multiple times """
obj = FTS3ServerPolicy(self.fakeServerDict, "Random")
serverSet = set()
for i in range(len(self.fakeServerDict)):
serverSet.add(obj._randomServerPolicy(i))
self.assertEquals(len(serverSet), len(self.fakeServerDict))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestFTS3Serialization)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFileGrouping))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFTS3ServerPolicy))
unittest.TextTestRunner(verbosity=2).run(suite)
|
fstagni/DIRAC
|
DataManagementSystem/private/test/Test_FTS3Utilities.py
|
Python
|
gpl-3.0
| 8,312
|
[
"DIRAC"
] |
b07ca6d807b395b5348437cc72dc6dcd92f6a1832f78b721d14c4128873513e3
|
import numpy as np
import scipy
import time as ttime
import logging
logger = logging.getLogger(__name__)
# =================================================================================
# The following set of functions are separated from the rest of the program
# and prepared to be moved to scikit-beam (skbeam.core.fitting.xrf_model)
def grid_interpolate(data, xx, yy, xx_uniform=None, yy_uniform=None):
"""
Interpolate unevenly sampled data to even grid. The new even grid has the same
dimensions as the original data and covers full range of original X and Y axes.
Parameters
----------
data : ndarray
2D array with data values (`xx`, `yy` and `data` must have the same shape)
``data`` may be None. In this case interpolation will not be performed, but uniform
grid will be generated. Use this feature to generate uniform grid.
xx : ndarray
2D array with measured values of X coordinates of data points (the values may be unevenly spaced)
yy : ndarray
2D array with measured values of Y coordinates of data points (the values may be unevenly spaced)
xx_uniform : ndarray
2D array with evenly spaced X axis values (same shape as `data`). If not provided, then
generated automatically and returned by the function.
yy_uniform : ndarray
2D array with evenly spaced Y axis values (same shape as `data`). If not provided, then
generated automatically and returned by the function.
Returns
-------
data_uniform : ndarray
2D array with data fitted to even grid (same shape as `data`)
xx_uniform : ndarray
2D array with evenly spaced X axis values (same shape as `data`)
yy_uniform : ndarray
2D array with evenly spaced Y axis values (same shape as `data`)
"""
# Check if data shape and shape of coordinate arrays match
if data is not None:
if data.shape != xx.shape:
msg = "Shapes of data and coordinate arrays do not match. (function 'grid_interpolate')"
raise ValueError(msg)
if xx.shape != yy.shape:
msg = "Shapes of coordinate arrays 'xx' and 'yy' do not match. (function 'grid_interpolate')"
raise ValueError(msg)
if (xx_uniform is not None) and (xx_uniform.shape != xx.shape):
msg = (
"Shapes of data and array of uniform coordinates 'xx_uniform' do not match. "
"(function 'grid_interpolate')"
)
raise ValueError(msg)
if (yy_uniform is not None) and (xx_uniform.shape != xx.shape):
msg = (
"Shapes of data and array of uniform coordinates 'yy_uniform' do not match. "
"(function 'grid_interpolate')"
)
raise ValueError(msg)
ny, nx = xx.shape
# Data must be 2-dimensional to use the following interpolation procedure.
if (nx <= 1) or (ny <= 1):
logger.debug("Function utils.grid_interpolate: single row or column scan. Grid interpolation is skipped")
return data, xx, yy
def _get_range(vv):
"""
Returns the range of the data coordinates along X or Y axis. Coordinate
data for a single axis is represented as a 2D array ``vv``. The array
will have all rows or all columns identical or almost identical.
The range is returned as ``vv_min`` (leftmost or topmost value)
and ``vv_max`` (rightmost or bottommost value). Note, that ``vv_min`` may
be greater than ``vv_max``
Parameters
----------
vv : ndarray
2-d array of coordinates
Returns
-------
vv_min : float
starting point of the range
vv_max : float
end of the range
"""
# The assumption is that X values are mostly changing along the dimension 1 and
# Y values change along the dimension 0 of the 2D array and only slightly change
# along the alternative dimension. Determine, if the range is for X or Y
# axis based on the dimension in which value change is the largest.
if abs(vv[0, 0] - vv[0, -1]) > abs(vv[0, 0] - vv[-1, 0]):
vv_min = np.median(vv[:, 0])
vv_max = np.median(vv[:, -1])
else:
vv_min = np.median(vv[0, :])
vv_max = np.median(vv[-1, :])
return vv_min, vv_max
if xx_uniform is None or yy_uniform is None:
# Find the range of axes
x_min, x_max = _get_range(xx)
y_min, y_max = _get_range(yy)
_yy_uniform, _xx_uniform = np.mgrid[y_min : y_max : ny * 1j, x_min : x_max : nx * 1j]
if xx_uniform is None:
xx_uniform = _xx_uniform
if yy_uniform is None:
yy_uniform = _yy_uniform
xx = xx.flatten()
yy = yy.flatten()
xxyy = np.stack((xx, yy)).T
if data is not None:
# Do the interpolation only if data is provided
data = data.flatten()
# Do the interpolation
data_uniform = scipy.interpolate.griddata(
xxyy, data, (xx_uniform, yy_uniform), method="linear", fill_value=0
)
else:
data_uniform = None
return data_uniform, xx_uniform, yy_uniform
def normalize_data_by_scaler(data_in, scaler, *, data_name=None, name_not_scalable=None):
"""
Normalize data based on the availability of scaler
Parameters
----------
data_in : ndarray
numpy array of input data
scaler : ndarray
numpy array of scaling data, the same size as data_in
data_name : str
name of the data set ('time' or 'i0' etc.)
name_not_scalable : list
names of not scalable datasets (['time', 'i0_time'])
Returns
-------
ndarray with normalized data, the same shape as data_in
The returned array is the reference to 'data_in' if no normalization
is applied to data or reference to modified copy of 'data_in' if
normalization was applied.
::note::
Normalization will not be performed if the following is true:
- scaler is None
- scaler is not the same shape as data_in
- scaler contains all elements equal to zero
If normalization is not performed then REFERENCE to data_in is returned.
"""
if data_in is None or scaler is None: # Nothing to scale
logger.debug(
"Function utils.normalize_data_by_scaler: data and/or scaler arrays are None. "
"Data scaling is skipped."
)
return data_in
if data_in.shape != scaler.shape:
logger.debug(
"Function utils.normalize_data_by_scaler: data and scaler arrays have different shape. "
"Data scaling is skipped."
)
return data_in
do_scaling = False
# Check if data name is in the list of non-scalable items
# If data name or the list does not exits, then do the scaling
if name_not_scalable is None or data_name is None or data_name not in name_not_scalable:
do_scaling = True
# If scaler is all zeros, then don't scale the data:
# check if there is at least one nonzero element
n_nonzero = np.count_nonzero(scaler)
if not n_nonzero:
logger.debug(
"Function utils.normalize_data_by_scaler: scaler is all-zeros array. Data scaling is skipped."
)
do_scaling = False
if do_scaling:
# If scaler contains some zeros, set those zeros to mean value
if data_in.size != n_nonzero:
s_mean = np.mean(scaler[scaler != 0])
# Avoid division by very small number (or zero)
if np.abs(s_mean) < 1e-10:
s_mean = 1e-10 if np.sign(s_mean) >= 0 else -1e-10
scaler = scaler.copy()
scaler[scaler == 0.0] = s_mean
data_out = data_in / scaler
else:
data_out = data_in
return data_out
# ===============================================================================
# The following functions are prepared to be moved to scikit-beam
def _get_2_sqrt_2_log2():
return 2 * np.sqrt(2 * np.log(2))
def gaussian_sigma_to_fwhm(sigma):
"""
Converts parameters of Gaussian curve: 'sigma' to 'fwhm'
Parameters
----------
sigma : float
sigma of the Gaussian curve
Returns
-------
FWHM of the Gaussian curve
"""
return sigma * _get_2_sqrt_2_log2()
def gaussian_fwhm_to_sigma(fwhm):
"""
Converts parameters of Gaussian curve: 'fwhm' to 'sigma'
Parameters
----------
fwhm : float
Full Width at Half Maximum of the Gaussian curve
Returns
-------
sigma of the Gaussian curve
"""
return fwhm / _get_2_sqrt_2_log2()
def _get_sqrt_2_pi():
return np.sqrt(2 * np.pi)
def gaussian_max_to_area(peak_max, peak_sigma):
"""
Computes the area under Gaussian curve based on maximum and sigma
Parameters
----------
peak_max : float
maximum of the Gaussian curve
peak_sigma : float
sigma of the Gaussian curve
Returns
-------
area under the Gaussian curve
"""
return peak_max * peak_sigma * _get_sqrt_2_pi()
def gaussian_area_to_max(peak_area, peak_sigma):
"""
Computes the maximum of the Gaussian curve based on area
under the curve and sigma
Parameters
----------
peak_area : float
area under the Gaussian curve
peak_sigma : float
sigma of the Gaussian curve
Returns
-------
area under the Gaussian curve
"""
if peak_sigma == 0:
return 0
else:
return peak_area / peak_sigma / _get_sqrt_2_pi()
# ==================================================================================
def convert_time_to_nexus_string(t):
"""
Convert time to a string according to NEXUS format
Parameters
----------
t : time.struct_time
Time in the format returned by ``time.localtime`` or ``time.gmtime``
Returns
-------
t : str
A string represetation of time according to NEXUS standard
"""
# Convert to sting format recommented for NEXUS files
t = ttime.strftime("%Y-%m-%dT%H:%M:%S+00:00", t)
return t
def convert_time_from_nexus_string(t):
"""
Convert time from NEXUS string to ``time.struct_time``
Parameters
----------
t : str
A string represetation of time according to NEXUS standard
Returns
-------
t : time.struct_time
Time in the format returned by ``time.localtime`` or ``time.gmtime``
"""
# Convert to sting format recommented for NEXUS files
t = ttime.strptime(t, "%Y-%m-%dT%H:%M:%S+00:00")
return t
|
NSLS-II-HXN/PyXRF
|
pyxrf/core/utils.py
|
Python
|
bsd-3-clause
| 10,665
|
[
"Gaussian"
] |
df27f7dcd2cba3443c4e7896291944db313aebd39160ae189aa93cca1877a994
|
# mako/ast.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, util
import re
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code
# - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, basestring):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = set()
self.undeclared_identifiers = set()
if isinstance(code, basestring):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m:
raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" %
code, **exception_kwargs)
if m.group(3):
code = code[:m.start(3)]
(keyword, expr) = m.group(1,2)
if keyword in ['for','if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
elif keyword == 'elif' or keyword == 'else':
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
else:
raise exceptions.CompileException(
"Unsupported control keyword: '%s'" %
keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code,
**exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException(
"'**%s' keyword argument not allowed here" %
self.argnames[-1], **exception_kwargs)
def get_argument_expressions(self, include_defaults=True):
"""return the argument declarations of this FunctionDecl as a printable list."""
namedecls = []
defaults = [d for d in self.defaults]
kwargs = self.kwargs
varargs = self.varargs
argnames = [f for f in self.argnames]
argnames.reverse()
for arg in argnames:
default = None
if kwargs:
arg = "**" + arg
kwargs = False
elif varargs:
arg = "*" + arg
varargs = False
else:
default = len(defaults) and defaults.pop() or None
if include_defaults and default:
namedecls.insert(0, "%s=%s" %
(arg,
pyparser.ExpressionGenerator(default).value()
)
)
else:
namedecls.insert(0, arg)
return namedecls
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, **kwargs)
|
theguardian/LazyLibrarian_Old
|
mako/ast.py
|
Python
|
gpl-3.0
| 5,565
|
[
"VisIt"
] |
de3944168a14d2b22556fd3fa947b94f3b7c0ff8034f0505ee77f2f6d40bde66
|
""" Meager code path measurement tool.
Ned Batchelder
http://nedbatchelder.com/blog/200803/python_code_complexity_microtool.html
MIT License.
"""
from __future__ import with_statement
import optparse
import sys
import tokenize
from collections import defaultdict
try:
import ast
from ast import iter_child_nodes
except ImportError: # Python 2.5
from flake8.util import ast, iter_child_nodes
__version__ = '0.6.1'
class ASTVisitor(object):
"""Performs a depth-first walk of the AST."""
def __init__(self):
self.node = None
self._cache = {}
def default(self, node, *args):
for child in iter_child_nodes(node):
self.dispatch(child, *args)
def dispatch(self, node, *args):
self.node = node
klass = node.__class__
meth = self._cache.get(klass)
if meth is None:
className = klass.__name__
meth = getattr(self.visitor, 'visit' + className, self.default)
self._cache[klass] = meth
return meth(node, *args)
def preorder(self, tree, visitor, *args):
"""Do preorder walk of tree using visitor"""
self.visitor = visitor
visitor.visit = self.dispatch
self.dispatch(tree, *args) # XXX *args make sense?
class PathNode(object):
def __init__(self, name, look="circle"):
self.name = name
self.look = look
def to_dot(self):
print('node [shape=%s,label="%s"] %d;' % (
self.look, self.name, self.dot_id()))
def dot_id(self):
return id(self)
class PathGraph(object):
def __init__(self, name, entity, lineno, column=0):
self.name = name
self.entity = entity
self.lineno = lineno
self.column = column
self.nodes = defaultdict(list)
def connect(self, n1, n2):
self.nodes[n1].append(n2)
# Ensure that the destination node is always counted.
self.nodes[n2] = []
def to_dot(self):
print('subgraph {')
for node in self.nodes:
node.to_dot()
for node, nexts in self.nodes.items():
for next in nexts:
print('%s -- %s;' % (node.dot_id(), next.dot_id()))
print('}')
def complexity(self):
""" Return the McCabe complexity for the graph.
V-E+2
"""
num_edges = sum([len(n) for n in self.nodes.values()])
num_nodes = len(self.nodes)
return num_edges - num_nodes + 2
class PathGraphingAstVisitor(ASTVisitor):
""" A visitor for a parsed Abstract Syntax Tree which finds executable
statements.
"""
def __init__(self):
super(PathGraphingAstVisitor, self).__init__()
self.classname = ""
self.graphs = {}
self.reset()
def reset(self):
self.graph = None
self.tail = None
def dispatch_list(self, node_list):
for node in node_list:
self.dispatch(node)
def visitFunctionDef(self, node):
if self.classname:
entity = '%s%s' % (self.classname, node.name)
else:
entity = node.name
name = '%d:%d: %r' % (node.lineno, node.col_offset, entity)
if self.graph is not None:
# closure
pathnode = self.appendPathNode(name)
self.tail = pathnode
self.dispatch_list(node.body)
bottom = PathNode("", look='point')
self.graph.connect(self.tail, bottom)
self.graph.connect(pathnode, bottom)
self.tail = bottom
else:
self.graph = PathGraph(name, entity, node.lineno, node.col_offset)
pathnode = PathNode(name)
self.tail = pathnode
self.dispatch_list(node.body)
self.graphs["%s%s" % (self.classname, node.name)] = self.graph
self.reset()
visitAsyncFunctionDef = visitFunctionDef
def visitClassDef(self, node):
old_classname = self.classname
self.classname += node.name + "."
self.dispatch_list(node.body)
self.classname = old_classname
def appendPathNode(self, name):
if not self.tail:
return
pathnode = PathNode(name)
self.graph.connect(self.tail, pathnode)
self.tail = pathnode
return pathnode
def visitSimpleStatement(self, node):
if node.lineno is None:
lineno = 0
else:
lineno = node.lineno
name = "Stmt %d" % lineno
self.appendPathNode(name)
def default(self, node, *args):
if isinstance(node, ast.stmt):
self.visitSimpleStatement(node)
else:
super(PathGraphingAstVisitor, self).default(node, *args)
def visitLoop(self, node):
name = "Loop %d" % node.lineno
self._subgraph(node, name)
visitAsyncFor = visitFor = visitWhile = visitLoop
def visitIf(self, node):
name = "If %d" % node.lineno
self._subgraph(node, name)
def _subgraph(self, node, name, extra_blocks=()):
"""create the subgraphs representing any `if` and `for` statements"""
if self.graph is None:
# global loop
self.graph = PathGraph(name, name, node.lineno, node.col_offset)
pathnode = PathNode(name)
self._subgraph_parse(node, pathnode, extra_blocks)
self.graphs["%s%s" % (self.classname, name)] = self.graph
self.reset()
else:
pathnode = self.appendPathNode(name)
self._subgraph_parse(node, pathnode, extra_blocks)
def _subgraph_parse(self, node, pathnode, extra_blocks):
"""parse the body and any `else` block of `if` and `for` statements"""
loose_ends = []
self.tail = pathnode
self.dispatch_list(node.body)
loose_ends.append(self.tail)
for extra in extra_blocks:
self.tail = pathnode
self.dispatch_list(extra.body)
loose_ends.append(self.tail)
if node.orelse:
self.tail = pathnode
self.dispatch_list(node.orelse)
loose_ends.append(self.tail)
else:
loose_ends.append(pathnode)
if pathnode:
bottom = PathNode("", look='point')
for le in loose_ends:
self.graph.connect(le, bottom)
self.tail = bottom
def visitTryExcept(self, node):
name = "TryExcept %d" % node.lineno
self._subgraph(node, name, extra_blocks=node.handlers)
visitTry = visitTryExcept
def visitWith(self, node):
name = "With %d" % node.lineno
self.appendPathNode(name)
self.dispatch_list(node.body)
visitAsyncWith = visitWith
class McCabeChecker(object):
"""McCabe cyclomatic complexity checker."""
name = 'mccabe'
version = __version__
_code = 'C901'
_error_tmpl = "C901 %r is too complex (%d)"
max_complexity = -1
def __init__(self, tree, filename):
self.tree = tree
@classmethod
def add_options(cls, parser):
flag = '--max-complexity'
kwargs = {
'default': -1,
'action': 'store',
'type': 'int',
'help': 'McCabe complexity threshold',
'parse_from_config': 'True',
}
config_opts = getattr(parser, 'config_options', None)
if isinstance(config_opts, list):
# Flake8 2.x
kwargs.pop('parse_from_config')
parser.add_option(flag, **kwargs)
parser.config_options.append('max-complexity')
else:
parser.add_option(flag, **kwargs)
@classmethod
def parse_options(cls, options):
cls.max_complexity = int(options.max_complexity)
def run(self):
if self.max_complexity < 0:
return
visitor = PathGraphingAstVisitor()
visitor.preorder(self.tree, visitor)
for graph in visitor.graphs.values():
if graph.complexity() > self.max_complexity:
text = self._error_tmpl % (graph.entity, graph.complexity())
yield graph.lineno, graph.column, text, type(self)
def get_code_complexity(code, threshold=7, filename='stdin'):
try:
tree = compile(code, filename, "exec", ast.PyCF_ONLY_AST)
except SyntaxError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to parse %s: %s\n" % (filename, e))
return 0
complx = []
McCabeChecker.max_complexity = threshold
for lineno, offset, text, check in McCabeChecker(tree, filename).run():
complx.append('%s:%d:1: %s' % (filename, lineno, text))
if len(complx) == 0:
return 0
print('\n'.join(complx))
return len(complx)
def get_module_complexity(module_path, threshold=7):
"""Returns the complexity of a module"""
with open(module_path, "rU") as mod:
code = mod.read()
return get_code_complexity(code, threshold, filename=module_path)
def _read(filename):
if (2, 5) < sys.version_info < (3, 0):
with open(filename, 'rU') as f:
return f.read()
elif (3, 0) <= sys.version_info < (4, 0):
"""Read the source code."""
try:
with open(filename, 'rb') as f:
(encoding, _) = tokenize.detect_encoding(f.readline)
except (LookupError, SyntaxError, UnicodeError):
# Fall back if file encoding is improperly declared
with open(filename, encoding='latin-1') as f:
return f.read()
with open(filename, 'r', encoding=encoding) as f:
return f.read()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
opar = optparse.OptionParser()
opar.add_option("-d", "--dot", dest="dot",
help="output a graphviz dot file", action="store_true")
opar.add_option("-m", "--min", dest="threshold",
help="minimum complexity for output", type="int",
default=1)
options, args = opar.parse_args(argv)
code = _read(args[0])
tree = compile(code, args[0], "exec", ast.PyCF_ONLY_AST)
visitor = PathGraphingAstVisitor()
visitor.preorder(tree, visitor)
if options.dot:
print('graph {')
for graph in visitor.graphs.values():
if (not options.threshold or
graph.complexity() >= options.threshold):
graph.to_dot()
print('}')
else:
for graph in visitor.graphs.values():
if graph.complexity() >= options.threshold:
print(graph.name, graph.complexity())
if __name__ == '__main__':
main(sys.argv[1:])
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/mccabe.py
|
Python
|
apache-2.0
| 10,693
|
[
"VisIt"
] |
5cf332c1d42c846ff99d28dc91bf8eccdaa7090b9742fcb71530a5b292b01900
|
# -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Result utility module.
When we run Amber, we write certain files to indicate the result.
This module contains functions for getting the paths of these files.
"""
from pathlib import Path
from typing import Optional
from gfauto import util
def write_status(
result_output_dir: Path, status: str, bad_shader_name: Optional[str] = None
) -> None:
util.file_write_text(get_status_path(result_output_dir), status)
if bad_shader_name:
util.file_write_text(
get_status_bad_shader_name_path(result_output_dir), bad_shader_name
)
def get_status_path(result_output_dir: Path) -> Path:
return result_output_dir / "STATUS"
def get_status_bad_shader_name(result_output_dir: Path) -> str:
bad_shader_name_path = get_status_bad_shader_name_path(result_output_dir)
return util.file_read_text_or_else(bad_shader_name_path, "")
def get_status_bad_shader_name_path(result_output_dir: Path) -> Path:
return result_output_dir / "BAD_SHADER"
def get_status(result_output_dir: Path) -> str:
status_file = get_status_path(result_output_dir)
return util.file_read_text_or_else(status_file, "UNEXPECTED_ERROR")
def get_log_path(result_output_dir: Path) -> Path:
return result_output_dir / "log.txt"
def get_amber_log_path(result_dir: Path) -> Path:
return result_dir / "amber_log.txt"
|
google/graphicsfuzz
|
gfauto/gfauto/result_util.py
|
Python
|
apache-2.0
| 1,974
|
[
"Amber"
] |
e74672171e592a0616760637b4b8770cfe1d8ff73bbf3a58d1d7b99503ec50ed
|
"""
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.2, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.2, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10, edgecolors=(0, 0, 0))
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
|
chenyyx/scikit-learn-doc-zh
|
examples/zh/gaussian_process/plot_gpr_prior_posterior.py
|
Python
|
gpl-3.0
| 2,900
|
[
"Gaussian"
] |
1d3dd2ca5ab1739069c0308e36246dabe29b5450e177854b808e75939e09c4ec
|
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier ([email protected])
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from pysces.version import __version__
__doc__ = '''
PyscesConfig
------------
This module contains templates for the default configuration files
on POSIX and WIN32 systems as well as utility functions for reading
and writing them.
'''
import ConfigParser
import string
import os
if os.sys.platform == 'win32':
__DefaultWin = {
"install_dir" : os.path.join(os.sys.prefix,'lib','site-packages','pysces'),
"model_dir" : "os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces','psc')",
"output_dir" : "os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces')",
"gnuplot_dir" : "None",
"pitcon" : True,
"nleq2" : True,
"gnuplot" : False,
"matplotlib" : True,
"matplotlib_backend" : 'TKagg',
"silentstart" : False
}
__DefaultWinUsr = {
"model_dir" : os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces','psc'),
"output_dir" : os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces'),
"silentstart" : False
}
else:
if hasattr(os.sys, 'lib'):
lib = os.sys.lib
else:
lib = 'lib'
__DefaultPosix = {
"install_dir" : os.path.join(os.sys.prefix,lib,"python%d.%d" % tuple(os.sys.version_info[:2]) ,'site-packages','pysces'),
"model_dir" : "os.path.join(os.path.expanduser('~'),'Pysces','psc')",
"output_dir" : "os.path.join(os.path.expanduser('~'),'Pysces')",
"gnuplot_dir" : "None",
"pitcon" : True,
"nleq2" : True,
"gnuplot" : False,
"matplotlib" : True,
"matplotlib_backend" : 'TKagg',
"silentstart" : False
}
__DefaultPosixUsr = {
"model_dir" : os.path.join(os.path.expanduser('~'),'Pysces','psc'),
"output_dir" : os.path.join(os.path.expanduser('~'),'Pysces'),
"silentstart" : False
}
def ReadConfig(file_path, config={}):
"""
Read a PySCeS configuration file
- *file_path* full path to file
- *config [default={}]* configuration data
"""
filein = open(file_path,'r')
cp = ConfigParser.ConfigParser()
cp.readfp(filein)
for sec in cp.sections():
name = string.lower(sec)
for opt in cp.options(sec):
config[string.lower(opt)] = string.strip(cp.get(sec, opt))
filein.close()
return config
def WriteConfig(file_path, config={}, section='Pysces'):
"""
Write a PySCeS configuration file
- *file_path* full path to file
- *config* [default={}]: config dictionary
- *section* [default='Pysces']: default man section name
"""
cfgfile = open(file_path,'w')
cp = ConfigParser.ConfigParser()
cp.add_section(section)
for key in config:
cp.set(section,key, config[key])
cp.write(cfgfile)
cfgfile.close()
|
asttra/pysces
|
pysces/PyscesConfig.py
|
Python
|
bsd-3-clause
| 3,624
|
[
"PySCeS"
] |
39b8eb131108d75b68dddbd65996d42f289968773aae6fd55bddacdfc02d4842
|
#
# @file TestAnnotationCopyAndClone.py
# @brief Test the copy and clone methods for annotation classes
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/annotation/test/TestCopyAndClone.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestAnnotationCopyAndClone(unittest.TestCase):
def test_CVTerm_assignmentOperator(self):
CVTerm1 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
CVTerm1.addResource("http://www.geneontology.org/#GO:0005892")
self.assert_( CVTerm1.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( CVTerm1.getResources().getLength() == 1 )
self.assert_( CVTerm1.getResources().getValue(0) == "http://www.geneontology.org/#GO:0005892" )
CVTerm2 = libsbml.CVTerm()
CVTerm2 = CVTerm1
self.assert_( CVTerm2.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( CVTerm2.getResources().getLength() == 1 )
self.assert_( CVTerm2.getResources().getValue(0) == "http://www.geneontology.org/#GO:0005892" )
CVTerm2 = None
CVTerm1 = None
pass
def test_CVTerm_clone(self):
CVTerm1 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
CVTerm1.addResource("http://www.geneontology.org/#GO:0005892")
self.assert_( CVTerm1.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( CVTerm1.getResources().getLength() == 1 )
self.assert_( CVTerm1.getResources().getValue(0) == "http://www.geneontology.org/#GO:0005892" )
CVTerm2 = CVTerm1.clone()
self.assert_( CVTerm2.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( CVTerm2.getResources().getLength() == 1 )
self.assert_( CVTerm2.getResources().getValue(0) == "http://www.geneontology.org/#GO:0005892" )
CVTerm2 = None
CVTerm1 = None
pass
def test_CVTerm_copyConstructor(self):
CVTerm1 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
CVTerm1.addResource("http://www.geneontology.org/#GO:0005892")
self.assert_( CVTerm1.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( CVTerm1.getResources().getLength() == 1 )
self.assert_( CVTerm1.getResources().getValue(0) == "http://www.geneontology.org/#GO:0005892" )
CVTerm2 = libsbml.CVTerm(CVTerm1)
self.assert_( CVTerm2.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( CVTerm2.getResources().getLength() == 1 )
self.assert_( CVTerm2.getResources().getValue(0) == "http://www.geneontology.org/#GO:0005892" )
CVTerm2 = None
CVTerm1 = None
pass
def test_Date_assignmentOperator(self):
date = libsbml.Date(2005,12,30,12,15,45,1,2,0)
self.assert_( date.getMonth() == 12 )
self.assert_( date.getSecond() == 45 )
date2 = libsbml.Date()
date2 = date
self.assert_( date2.getMonth() == 12 )
self.assert_( date2.getSecond() == 45 )
date2 = None
date = None
pass
def test_Date_clone(self):
date = libsbml.Date(2005,12,30,12,15,45,1,2,0)
self.assert_( date.getMonth() == 12 )
self.assert_( date.getSecond() == 45 )
date2 = date.clone()
self.assert_( date2.getMonth() == 12 )
self.assert_( date2.getSecond() == 45 )
date2 = None
date = None
pass
def test_Date_copyConstructor(self):
date = libsbml.Date(2005,12,30,12,15,45,1,2,0)
self.assert_( date.getMonth() == 12 )
self.assert_( date.getSecond() == 45 )
date2 = libsbml.Date(date)
self.assert_( date2.getMonth() == 12 )
self.assert_( date2.getSecond() == 45 )
date2 = None
date = None
pass
def test_ModelCreator_assignmentOperator(self):
mc = libsbml.ModelCreator()
mc.setFamilyName("Keating")
mc.setEmail("[email protected]")
self.assert_( mc.getFamilyName() == "Keating" )
self.assert_( mc.getEmail() == "[email protected]" )
mc2 = libsbml.ModelCreator()
mc2 = mc
self.assert_( mc2.getFamilyName() == "Keating" )
self.assert_( mc2.getEmail() == "[email protected]" )
mc2 = None
mc = None
pass
def test_ModelCreator_clone(self):
mc = libsbml.ModelCreator()
mc.setFamilyName("Keating")
mc.setEmail("[email protected]")
self.assert_( mc.getFamilyName() == "Keating" )
self.assert_( mc.getEmail() == "[email protected]" )
mc2 = mc.clone()
self.assert_( mc2.getFamilyName() == "Keating" )
self.assert_( mc2.getEmail() == "[email protected]" )
mc2 = None
mc = None
pass
def test_ModelCreator_copyConstructor(self):
mc = libsbml.ModelCreator()
mc.setFamilyName("Keating")
mc.setEmail("[email protected]")
self.assert_( mc.getFamilyName() == "Keating" )
self.assert_( mc.getEmail() == "[email protected]" )
mc2 = libsbml.ModelCreator(mc)
self.assert_( mc2.getFamilyName() == "Keating" )
self.assert_( mc2.getEmail() == "[email protected]" )
mc2 = None
mc = None
pass
def test_ModelHistory_assignmentOperator(self):
mh = libsbml.ModelHistory()
mc = libsbml.ModelCreator()
mc.setGivenName("Sarah")
mc.setFamilyName("Keating")
mc.setEmail("[email protected]")
mh.addCreator(mc)
mc = None
date = libsbml.Date(2005,12,30,12,15,45,1,2,0)
mh.setCreatedDate(date)
date = None
self.assert_( mh.getCreatedDate().getMonth() == 12 )
self.assert_( mh.getCreatedDate().getSecond() == 45 )
self.assert_( mh.getCreator(0).getFamilyName() == "Keating" )
mh2 = libsbml.ModelHistory()
mh2 = mh
self.assert_( mh2.getCreatedDate().getMonth() == 12 )
self.assert_( mh2.getCreatedDate().getSecond() == 45 )
self.assert_( mh2.getCreator(0).getFamilyName() == "Keating" )
mh2 = None
mh = None
pass
def test_ModelHistory_clone(self):
mh = libsbml.ModelHistory()
mc = libsbml.ModelCreator()
mc.setFamilyName("Keating")
mc.setGivenName("Sarah")
mc.setEmail("[email protected]")
mh.addCreator(mc)
mc = None
date = libsbml.Date(2005,12,30,12,15,45,1,2,0)
mh.setCreatedDate(date)
date = None
self.assert_( mh.getCreatedDate().getMonth() == 12 )
self.assert_( mh.getCreatedDate().getSecond() == 45 )
self.assert_( mh.getCreator(0).getFamilyName() == "Keating" )
mh2 = mh.clone()
self.assert_( mh2.getCreatedDate().getMonth() == 12 )
self.assert_( mh2.getCreatedDate().getSecond() == 45 )
self.assert_( mh2.getCreator(0).getFamilyName() == "Keating" )
mh2 = None
mh = None
pass
def test_ModelHistory_copyConstructor(self):
mh = libsbml.ModelHistory()
mc = libsbml.ModelCreator()
mc.setFamilyName("Keating")
mc.setGivenName("Sarah")
mc.setEmail("[email protected]")
mh.addCreator(mc)
mc = None
date = libsbml.Date(2005,12,30,12,15,45,1,2,0)
mh.setCreatedDate(date)
date = None
self.assert_( mh.getCreatedDate().getMonth() == 12 )
self.assert_( mh.getCreatedDate().getSecond() == 45 )
self.assert_( mh.getCreator(0).getFamilyName() == "Keating" )
mh2 = libsbml.ModelHistory(mh)
self.assert_( mh2.getCreatedDate().getMonth() == 12 )
self.assert_( mh2.getCreatedDate().getSecond() == 45 )
self.assert_( mh2.getCreator(0).getFamilyName() == "Keating" )
mh2 = None
mh = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestAnnotationCopyAndClone))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/annotation/TestAnnotationCopyAndClone.py
|
Python
|
bsd-3-clause
| 8,732
|
[
"VisIt"
] |
e543b2023c657f78798b85e2234328485e75546c3e8455b2f0a18f52034e61f7
|
import matplotlib
#matplotlib.use('WXAgg')
from matplotlib import pyplot as pl #this is Python's main scientific plotting library.
from matplotlib import cm
import cv2 #computer vision library. interfaces with python over numpy arrays.
import numpy as np
from scipy import stats
from sys import argv #for command line arguments
from os.path import splitext, basename, exists, sep #some file path handling
import os
from PIL import Image
from time import time
from datetime import timedelta
from itertools import chain
from glob import glob
from scipy.misc import imresize
from string import digits
import re
import subprocess
import MySQLdb as mdb
import types
import cPickle
try:
from scipy.spatial import Voronoi
from scipy.spatial.qhull import QhullError
vorflag=True
except ImportError:
vorflag=False
print "Voronoi module not found. Update your scipy version to >0.12 if possible. Proceeding without Voronoi tesselation features."
from scipy import optimize,stats
np.set_printoptions(precision=3, suppress=True)
digits = frozenset(digits)
rmin=9 #particle size limits for Hough transform. (which we don't really use any more)
rmax=30
COORDHEADER='#frame particle# blobsize x y split_blob? [reserved] sphericity\n'
class trajectory():
"""Single particle trajectory.
Attributes:
data: lengthX4 array, columns frame #, x, y coordinates, size
opened: Boolean flag. Marks whether particle is still being tracked.
maxdist: maximum distance a particle travels between frames.
number: trajectory ID.
Methods:
findNeighbour: Finds the nearest neighbour in particle coordinate set from next frame."""
def __init__(self,data,number, maxdist=-1):
self.data=np.array(data) #trajectory coordinate series, lengthX3 array
self.opened=True #flag: trajectory considered lost when no particle closer than max. distance in next frame.
self.maxdist=maxdist #set maximum distance (this is replaced by actual particle diameter right after trajectory is initialised)
self.number=number #trajectory ID
self.lossCnt=0
def findNeighbour(self,nxt, frame, idx=0, lossmargin=10, spacing=1):
"""Finds the nearest neighbour in particle coordinate set from next frame.
Accepts next neighbour numpy array (2 x # particles, xy columns).
Extends data attribute array with new coordinates if successful, closes trajectory if not.
Returns next neighbour numpy array with matching particle removed for speedup and to avoid double counting."""
if frame-self.data[-1,0]>(lossmargin+1)*spacing: #if there are frame continuity gaps bigger than the loss tolerance, close trajectory!
self.opened=False
return nxt
if nxt.size>0:
dist=(self.data[-1,2]-nxt[:,0+idx])**2+(self.data[-1,3]-nxt[:,1+idx])**2
m=min(dist)
else:
m=self.maxdist+1 #this will lead to trajectory closure
print "no particles left in this frame", frame, self.number
if m<self.maxdist:
ind=(dist==m).nonzero()[0]
try:
self.data=np.vstack((self.data,np.array([[frame,ind, nxt[ind,idx],nxt[ind,idx+1], nxt[ind,idx-1]]]))) #append new coordinates to trajectory
except IndexError:
print "SOMETHING WRONG HERE!", self.data.shape, nxt.shape, frame, self.number #not sure what.
self.lossCnt+=1
if self.lossCnt>lossmargin:
self.opened=False #close trajectory, don't remove particle from coordinate array.
else:
predCoord=lin_traj(self.data[-lossmargin:,2],self.data[-lossmargin:,3])
if np.isnan(predCoord[0]): predCoord=self.data[-1][2:4]
self.data=np.vstack((self.data,np.array([[frame, -1, predCoord[0], predCoord[1], self.data[-1,-1]]])))
return nxt
self.lossCnt=0
return np.delete(nxt,ind,0) #remove particle and return coordinate set.
else:
self.lossCnt+=1
if self.lossCnt>lossmargin:
self.opened=False #close trajectory, don't remove particle from coordinate array.
else:
predCoord=lin_traj(self.data[-lossmargin:,2],self.data[-lossmargin:,3])
if np.isnan(predCoord[0]): predCoord=self.data[-1][2:4]
self.data=np.vstack((self.data,np.array([[frame, -1,predCoord[0], predCoord[1],self.data[-1,-1]]])))
return nxt
class movie():
"""Class for handling 2D video microscopy data. Additionally requires mplayer in the PATH.
Argument: video filename.
Keyword parameters: TTAB concentration and background filename.
Attributes:
fname: filename (string)
trajectories: particle trajectory objects (dictionary, keys are particle IDs)
bg: background. boolean (False if no bg), filename (string), or numpy image array (1 channel greyscale)
shape: image dimensions. xy-tuple of 2 integers.
datadir: directory to hold trajectory data, background image, extraction parameters etc. Default: movie filename without extension.
TTAB: TTAB concentration.
kernel: diameter of the structuring ellipse used for image dilation/erosion. Value int, or False for no morphological operation.
threshold: binarisation threshold (int). Applied to greyscale image _after_ bg subtraction and contrast rescaling. Default 128 (mid grey).
blobsize: minimum/maximum 2d particle sizes for tracking. (int, int) tuple.
framelim: frame number boundaries for extraction. (int, int) tuple. Default either (0,1e8) or (0, max frame #).
Methods:
sqlCoords, extractCoords,testFrame, getFrame, loadBG, getBG, gotoFrame, getFrame, findTrajectories, plotMovie, loadTrajectories, stitchTrajectories, gridAnalysis
"""
def __init__(self,fname, TTAB=-1, bg=''):
"""Initialises movie object. Parameter video filename, keywords TTAB (surfactant concentration), path to extrapolated background file.
"""
self.typ="Particles"
self.fname=fname
self.trajectories={}
self.bg=False
self.datadir=splitext(fname)[0]+'-data'+sep
if bg!='':
if type(bg).__name__=='ndarray':
self.bg=bg
shape=bg.shape[:2]
if type(bg).__name__=='str':
try:
im=np.array(Image.open(bg))
shape=im.shape[:2]
if len(im.shape)==3: im=im[:,:,0]
self.bg=im
except: pass
self.TTAB=TTAB
if os.name=='posix': #this assumes you installed mplayer! We're also quite possibly doing the mplayer output multiple times. Better safe than sorry. TODO cleanup
result = subprocess.check_output(['mplayer','-vo','null','-ao','null','-identify','-frames','0',self.fname])
if os.name=='nt': #this assumes you installed mplayer and have the folder in your PATH!
result = subprocess.check_output(['mplayer.exe','-vo','null','-ao', 'null','-identify','-frames','0',self.fname])
try:
shape=(int(re.search('(?<=ID_VIDEO_WIDTH=)[0-9]+',result).group()),int(re.search('(?<=ID_VIDEO_HEIGHT=)[0-9]+',result).group()))
framerate=np.float(re.search('(?<=ID_VIDEO_FPS=)[0-9.]+',result).group())
frames=int(np.round(np.float(re.search('(?<=ID_LENGTH=)[0-9.]+',result).group())*framerate))
framelim=(0,frames)
except:
shape=(0,0)
framerate=0.
frames=0.
framelim=(0,1e8)
self.parameters={
'framerate':framerate, 'sphericity':-1.,'xscale':-1.0,'yscale':-1.0,'zscale':-1.0,#float
'imsize':shape,'blobsize':(0,30),'crop':[0,0,shape[0],shape[1]], 'framelim':framelim, 'circle':[shape[0]/2, shape[1]/2, int(np.sqrt(shape[0]**2+shape[1**2]))],#tuples
'channel':0, 'blur':1, 'spacing':1, 'struct':1, 'threshold':128, 'frames':frames,'imgspacing':-1,'maxdist':1e4,'lossmargin':10, 'lenlim':1,#ints
'sizepreview':True, 'invert':False, 'diskfit':False, 'mask':True #bools
}
def readParas(self, fname='paras.txt'):
#self.parameters={}
with open(self.datadir+fname) as f:
text=f.read()
text=text.split('\n')
for t in text:
t=t.split(': ')
if t[0].strip() in ['struct','threshold','frames', 'channel','blur','spacing','imgspacing','maxdist','lossmargin','lenlim']:#integer parameters
self.parameters[t[0]]=int(t[1])
if t[0].strip() in ['blobsize','imsize', 'crop','framelim', 'circle']:#tuple parameters
tsplit=re.sub('[\s\[\]\(\)]','',t[1]).split(',')
self.parameters[t[0]]=tuple([int(it) for it in tsplit])
if t[0].strip() in ['framerate','sphericity','xscale','yscale','zscale']:#float parameters
self.parameters[t[0]]=float(t[1])
if t[0].strip() in ['sizepreview','mask','diskfit','invert']:#boolean parameters
self.parameters[t[0]]=str_to_bool(t[1])
if self.parameters['struct']>1: self.kernel= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.parameters['struct'],self.parameters['struct']))
else: self.kernel=False
def sqlCoords(self,dbname,csvfile):
"""Connects to a SQL database and dumps the particle coordinate data into a table. Logs on as """
db = mdb.connect(host="localhost", user="cmaass",passwd="swimmers", local_infile=True)
cur=db.cursor()
try:
cur.execute('CREATE DATABASE IF NOT EXISTS %s;'%dbname)
cur.execute('USE %s;'%dbname)
cur.execute('DROP TABLE IF EXISTS coords;')
cur.execute('create table coords(frame INT, id INT, size INT, x DOUBLE, y DOUBLE, usg INT);')
cur.execute("LOAD DATA LOCAL INFILE '%s' INTO TABLE coords FIELDS TERMINATED BY ' ' LINES TERMINATED BY '\n';"%csvfile)
print 'done'
except:
cur.close()
db.close()
raise
cur.close()
db.close()
def extractCoords(self,framelim=False, blobsize=False, threshold=False, kernel=False, delete=False, mask=False, channel=0, sphericity=-1, diskfit=True, blur=1,crop=False, invert=False):
if not framelim: framelim=self.parameters['framelim']
if not blobsize: blobsize=self.parameters['blobsize']
if not threshold: threshold=self.parameters['threshold']
if type(kernel).__name__!='ndarray': kernel=np.array([1]).astype(np.uint8)
if type(mask).__name__=='str':
try:
im=np.array(Image.open(mask))
if len(im.shape)==3: im=im[:,:,channel]
mask=(im>0).astype(float)
except: mask=False
tInit=time()
success=True #VideoCapture read method returns False when running out of frames.
mov=cv2.VideoCapture(self.fname) #open movie (works on both live feed and saved movies)
framenum=framelim[0]
if framenum>1: dum,my,p=self.gotoFrame(mov,framenum-1)
if not exists(self.datadir):
os.mkdir(self.datadir)
if type(self.bg).__name__=='ndarray':
bgtrue=True
bg=self.bg.astype(float)
if blur>1: bg=cv2.GaussianBlur(bg,(blur,blur),0)
else: bgtrue=False
try: os.remove(self.datadir+'temp')
except OSError: pass
dumpfile=open(self.datadir+'temp','a')
allblobs=np.array([]).reshape(0,8)
dumpfile.write(COORDHEADER)
counter=0
while success and framenum<framelim[1]: #loop through frames
framenum+=1
if framenum%200==0:
print 'frame',framenum, 'time', str(timedelta(seconds=time()-tInit)), '# particles', counter #progress marker
np.savetxt(dumpfile,allblobs,fmt="%.2f")
allblobs=np.array([]).reshape(0,8)
success,image=mov.read()
if success:
im=image[:,:,channel].astype(float)
if blur>1: im=cv2.GaussianBlur(im,(blur,blur),0)
if bgtrue:
im-=bg
if type(mask).__name__=='ndarray':
im=im*mask
im=mxContr(im) #TODO: this might be a few rescalings too many. try to make this simpler, but make it work first
thresh=mxContr((im<threshold).astype(int))
if type(kernel).__name__=='ndarray': thresh=cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
if np.amax(thresh)!=np.amin(thresh): blobs=extract_blobs(thresh,framenum,sphericity=sphericity,blobsize=blobsize,diskfit=diskfit)
else: blobs=np.array([]).reshape(0,8)
counter=blobs.shape[0]
try: allblobs=np.vstack((allblobs,blobs))
except ValueError:
pass
#print "Value Error!", allblobs.shape, blobs.shape
np.savetxt(dumpfile,allblobs,fmt="%.2f")
dumpfile.close()
with open(self.datadir+'temp','r') as f: tempdata=f.read()[:-1]
with open(self.datadir+'temp','w') as f: f.write(tempdata)
def getFrame(self,framenum):
"""Retrieves frame of number framenum from open movie. Returns numpy array image, or False if unsuccessful.
Due to movie search/keyframe issues, framenumber might not be exact."""
mov=cv2.VideoCapture(self.fname)
if framenum>1: s,r,p=self.gotoFrame(mov,framenum-1)
success,image=mov.read()
if success: return image
else: return False
def loadBG(self, filename=''):
if filename=="": filename=self.datadir+'bg.png'
self.bg=np.array(Image.open(filename))
def getBGold(self, num=50, spac=50, prerun=1000, cutoff=100, save=False, channel=0):
mov=cv2.VideoCapture(self.fname)
#loop through to start frame
if prerun>1:
s,r,p=self.gotoFrame(mov,prerun-1)
print "target frame reached..."
else: s,r=mov.read()
tInit=time()
print "Extracting images..."
bgs=r
print r.shape
if len(bgs.shape)==3:
rgb=True
bgs=bgs[:,:,channel]
else:rgb=False
for i in range(num*spac):
success,r=mov.read()
#we decimate by 'spac' to get more even statistics. It would be nice if opencv had a way to skipframes!
if i%spac==0 and success:
if rgb: r=r[:,:,channel]
bgs=np.dstack((bgs,r.astype(int)))
print "Elapsed time %.1f seconds.\n Averaging..."%(time()-tInit), "shape: ", bgs.shape
# initialise averaged images
bg=np.empty(bgs.shape[:2])
for i in range(bgs.shape[0]):
for j in range(bgs.shape[1]):
#for each pixel (don't know how to do this more elegantly than loops over x and y):
#treshold to filter out dark (moving) structures
#assign most common (argmax) pixel colour to averaged image (this could be more sophisticated, but I don't really care)
f=bgs[i,j,:]
g=f[f>cutoff]
if len(g)>0: bg.itemset((i,j),np.argmax(np.bincount(g.astype(int))))
# if this is a dark image area anyway, just take the pixel average and hope it'soutside the ROI (like the cell walls)
else: bg.itemset((i,j), np.mean(f))
print "Elapsed time %.1f seconds.\n Done."%(time()-tInit)
mov.release()
if not exists(self.datadir):
os.mkdir(self.datadir)
if save: Image.fromarray(bg.astype(np.uint8)).save(self.datadir+'bg.png')
self.bg=bg
return bg
def getBG(self, num=50, spac=50, prerun=1000, rng=100, save=False, channel=0):
mov=cv2.VideoCapture(self.fname)
#loop through to start frame
if prerun>1:
s,r,p=self.gotoFrame(mov,prerun-1)
print "target frame reached..."
else: s,r=mov.read()
tInit=time()
print "Extracting images..."
bgs=r
print r.shape
if len(bgs.shape)==3:
rgb=True
bgs=bgs[:,:,channel]
else:rgb=False
for i in range(num*spac):
success,r=mov.read()
#we decimate by 'spac' to get more even statistics. It would be nice if opencv had a way to skip frames! (better than gotoFrame, anyway)
if i%spac==0 and success:
if rgb: r=r[:,:,channel]
bgs=np.dstack((bgs,r.astype(int)))
print "Elapsed time %s.\n Averaging..."%str(timedelta(seconds=time()-tInit)), "shape: ", bgs.shape
# initialise averaged images
bg=np.empty(bgs.shape[:2])
for i in range(bgs.shape[0]):
for j in range(bgs.shape[1]):
#for each pixel (don't know how to do this more elegantly than loops over x and y):
#treshold to filter out dark (moving) structures
#assign most common (argmax) np.pixel colour to averaged image (this could be more sophisticated, but I don't really care)
f=bgs[i,j,:]
if hasattr(rng, "__len__"): g=f[np.logical_and(rng[0]<f, f<rng[1])]
else: g=f[f>rng]
if len(g)>0: bg.itemset((i,j),np.argmax(np.bincount(g.astype(int))))
# if this is a dark image area anyway, just take the pixel average and hope it's outside the ROI (like the cell walls)
else: bg.itemset((i,j), np.mean(f))
print "Elapsed time %s.\n Done."%str(timedelta(seconds=time()-tInit))
mov.release()
if not exists(self.datadir):
os.mkdir(self.datadir)
if save: Image.fromarray(bg.astype(np.uint8)).save(self.datadir+'bg.png')
self.bg=bg
return bg
def gotoFrame(self,mov,position, channel=0):
positiontoset = position
pos = -1
success=True
mov.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, position)
while pos < position:
success, image = mov.read()
pos = mov.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
print pos
if pos == position:
mov.release()
return success,image,pos
if pos > position:
positiontoset -= 1
mov.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, positiontoset)
pos = -1
if success: return success, image,pos
else: return success, None,-1
def getFrames(self,position, length=1, channel=0,spacing=1):
mov=cv2.VideoCapture(self.fname)
if position>0: success,images,p=self.gotoFrame(mov,position)
else: success,images=mov.read()
images=images[:,:,channel]
for i in range(length-1)*spacing:
s,im=mov.read()
if i%spacing==0:
images=np.dstack((images, im[:,:,channel]))
mov.release()
return images
def CoordtoTraj(self, tempfile='temp',lenlim=-1, delete=True, breakind=1e9, maxdist=-1, lossmargin=-1, spacing=1, idx=3, consolidate=False):#TODO Adjust for frame jumps!!!
t0=time()
if delete:
for f in glob(self.datadir+'trajectory*.txt'): os.remove(f)
if tempfile=='temp':tempfile=self.datadir+'temp' #TODO: 'coords.txt'!!!
if tempfile=='coords.txt':tempfile=self.datadir+'coords.txt'
if maxdist<0: maxdist=self.parameters['maxdist']
if lossmargin<0: lossmargin=self.parameters['lossmargin'] #if not set, take parameter file value
if lenlim<0: lenlim=self.parameters['lenlim']
#the 'consolidate' flag produces an average coordinate/frame number and 3D size (voxel) for each closed trajectory and writes them into a single coordinate file to get z stack tracing. A second tracking run will then generate time tracking. No single trajectory output.
if consolidate:
np.set_printoptions(precision=3,suppress=True)
if type(consolidate) is not str: stckf=open('stackcoord.txt','w')
else: stckf=open(consolidate,'w')
stckf.write(COORDHEADER)
print """
maxdist: %f
lossmargin: %d
lenlim: %d
"""%(maxdist, lossmargin,lenlim)
dataArr=np.loadtxt(tempfile)
trajectorycount=0
frames=sorted(list(set(dataArr[:,0])))
#put in frame range here!
activetrajectories={}
for i in range(1,len(frames)):
try: arrInd=np.searchsorted(dataArr[:,0], frames[i])
except IndexError: break
blobs,dataArr=np.split(dataArr, [arrInd])
if frames[i]%400==0:
print "framenum", frames[i], 'remaining data', dataArr.shape, 'active traj.', len(activetrajectories), 'time', time()-t0
if frames[i]>breakind:
breakind=1e9
print "break here?"
for tr in activetrajectories.values():
blobs=tr.findNeighbour(blobs, frames[i], idx=idx, lossmargin=lossmargin) #for each open trajectory, find corresponding particle in circle set
if not tr.opened: #if a trajectory is closed in the process (no nearest neighbour found), move to closed trajectories.
if tr.data.shape[0]>lenlim:
if not consolidate:
np.savetxt(self.datadir+'trajectory%06d.txt'%tr.number, tr.data, fmt='%.2f', header="frame particle# x y area")
print "closed trajectory: ", tr.number, tr.maxdist
else:
trmean=list(np.mean(tr.data,axis=0))
trmean[-1]=tr.data.shape[0]*trmean[-1]
#reshuffle to get proper file format
trmean=np.array(trmean[:2]+trmean[-1:]+trmean[2:4]+[0.,0.,1.])
stckf.write(str(trmean)[1:-1].strip()+'\n')
del activetrajectories[tr.number]
for blob in blobs: #if any circles are left in the set, open a new trajectory for each of them
trajectorycount+=1
activetrajectories[trajectorycount]=trajectory(np.array([[frames[i],blob[1],blob[3],blob[4],blob[2]]]),trajectorycount, maxdist=maxdist)
print "trajectories:", len(activetrajectories)
for tr in activetrajectories.values():
if not consolidate:
np.savetxt(self.datadir+'trajectory%06d.txt'%tr.number, tr.data, fmt='%.2f', header="frame particle# x y area")
print "closed trajectory: ",tr.number, np.sqrt(tr.maxdist)
else:
trmean=list(np.mean(tr.data,axis=0))
trmean[-1]=tr.data.shape[0]*trmean[-1]
trmean=np.array(trmean[:2]+trmean[-1:]+trmean[2:4]+[0.,0.,1.])
stckf.write(str(trmean)[1:-1].strip()+'\n')
try: stckf.close()
except: pass
def findTrajectories(self,framelim=False, blobsize=False,lenlim=-1, threshold=False, kernel=False, delete=False, invert=False, mask=False, channel=0, sphericity=-1., outpSpac=200, diskfit=True, idx=3):
if not framelim: framelim=self.parameters['framelim']
if not blobsize: blobsize=self.parameters['blobsize']
if not threshold: threshold=self.parameters['threshold']
if self.parameters['maxdist']<0: maxdist=self.parameters['maxdist']
else: maxdist=self.parameters['maxdist']
if lenlim<0: lenlim=self.parameters['lenlim']
if type(kernel).__name__!='ndarray': kernel=np.array([1]).astype(np.uint8)
if type(mask).__name__=='str':
try:
im=np.array(Image.open(mask))
if len(im.shape)==3: im=im[:,:,channel]
mask=(im>0).astype(float)
except: mask=False
tInit=time()
success=True #VideoCapture read method returns False when running out of frames.
mov=cv2.VideoCapture(self.fname) #open movie (works on both live feed and saved movies)
activetrajectories={} #dictionary to hold open trajectories
framenum=framelim[0]
if framenum>1: dum,my,p=self.gotoFrame(mov,framenum-1)
trajectorycount=0 #keeps track of trajectory IDs
if not exists(self.datadir):
os.mkdir(self.datadir)
if delete:
for f in glob(self.datadir+'trajectory*.txt'): os.remove(f)
if type(self.bg).__name__=='ndarray':
bgtrue=True
bg=self.bg.astype(float)
else: bgtrue=False
while success and framenum<framelim[1]: #loop through frames
framenum+=1
if framenum%outpSpac==0: print 'frame',framenum, 'time', str(timedelta(seconds=time()-tInit)), 'open trajectories', len(activetrajectories) #progress marker
success,image=mov.read()
if success:
if bgtrue:
im=image[:,:,channel] -bg
else:
im=image[:,:,channel].astype(float)
im=mxContr(im) #TODO: this might be a few rescalings too many. try to make this simpler, but make it work first
if type(mask).__name__=='ndarray':
im=im*mask
thresh=mxContr((im<threshold).astype(int))
if type(kernel).__name__=='ndarray': thresh=cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
if invert: thresh=255-thresh
if np.amax(thresh)!=np.amin(thresh): blobs=extract_blobs(thresh,framenum,blobsize=blobsize,sphericity=sphericity, outpSpac=outpSpac, diskfit=diskfit)
else: blobs=np.array([]).reshape(0,8)
if framenum>framelim[0]:
for tr in activetrajectories.values():
blobs=tr.findNeighbour(blobs, framenum, idx=idx) #for each open trajectory, find corresponding particle in circle set
if not tr.opened: #if a trajectory is closed in the process (no nearest neighbour found), move to closed trajectories.
if tr.data.shape[0]>lenlim:
np.savetxt(self.datadir+'trajectory%06d.txt'%tr.number, tr.data, fmt='%.2f', header="frame particle# x y area")
print "closed trajectory: ", tr.number, tr.maxdist
del activetrajectories[tr.number]
for blob in blobs: #if any circles are left in the set, open a new trajectory for each of them
trajectorycount+=1
activetrajectories[trajectorycount]=trajectory(np.array([[framenum,blob[3],blob[4], blob[2]]]),trajectorycount, maxdist=maxdist)
#activetrajectories[trajectorycount].maxdist=5*np.sqrt(blob[4]/np.pi) #initialise maximum allowed nearest neighbour distance = particle diameter
self.trajectories=activetrajectories
print "trajectories:", len(activetrajectories)
for tr in self.trajectories.values():
#if tr.data.shape[0]>lenlim:
np.savetxt(self.datadir+'trajectory%06d.txt'%tr.number, tr.data, fmt='%.2f', header="frame particle# x y area")
print "closed trajectory: ",tr.number, tr.maxdist
mov.release()
def plotMovie(self, outname=None, decim=10,scale=2, crop=[0,0,0,0], mask='trajectory',frate=10, cmap=cm.jet, bounds=(0,1e8), tr=True, channel=0, lenlim=1):
"""crop values: [left, bottom, right, top]"""
if not outname: outname=self.datadir+basename(self.fname)[:-4]+'-traced.avi'
mov=cv2.VideoCapture(self.fname)
success,image=mov.read()
if crop[2]==0: crop[2]=-image.shape[0]
if crop[3]==0: crop[3]=-image.shape[1]
test=image[:,:,channel]
image=np.dstack((test,test,test))
test=test.copy()[crop[0]:-crop[2],crop[1]:-crop[3]]
print test.shape
print crop
size=(int(test.shape[0]/scale),int(test.shape[1]/scale))
print size
out=cv2.VideoWriter(outname,cv2.cv.CV_FOURCC('D','I','V','X'),frate,(size[1],size[0]))
count=0.
trajectories=[]
if tr:
for ob in glob(self.datadir+mask+'*.txt'):
tr=np.loadtxt(ob)
#tr[:,0]=np.around(tr[:,0]/tr[0,0])
if tr.shape[0]>lenlim:
try:
minNaN=np.min(np.isnan(np.sum(tr[:,2:4], axis=1)).nonzero()[0])
print 'NaN', ob, minNaN
except:
minNaN=tr.shape[0]
trajectories+=[tr[:minNaN,:]]
print '# of trajectories', len(trajectories)
while success:
if (bounds[0] <= count <= bounds[1]) and count%decim==0:
for i in range(len(trajectories)):
if trajectories[i][-1,0]<count:
pts = trajectories[i][:,2:4].astype(np.int32) #check indices and shape!!!
colour=tuple([int(255*r) for r in cmap(np.float(i)/len(trajectories))[:3]])[::-1]
#colour=(0,120,0)
cv2.polylines(image,[pts],isClosed=False,color=colour,thickness=int(np.round(scale)))
else:
w=(trajectories[i][:,0]==count).nonzero()[0]
if len(w)>0:
pts = trajectories[i][:w[0],2:4].astype(np.int32) #check indices and shape!!!
colour=tuple([int(255*r) for r in cmap(np.float(i)/len(trajectories))[:3]])[::-1]
cv2.polylines(image,[pts],isClosed=False,color=colour,thickness=int(np.round(scale)))
image=image[crop[0]:-crop[2],crop[1]:-crop[3]]
outim=imresize(image,1./scale)
if count%min(self.parameters['framelim'][1]/10,1000)==0: Image.fromarray(outim).save(self.datadir+'testim%06d.png'%count)
out.write(outim[:,:,::-1])
success,image=mov.read()
if success:
image=image[:,:,channel]
image=np.dstack((image,image,image))
count+=1
if count%min(self.parameters['framelim'][1]/10,1000)==0: print count
if count > bounds[1]:
success=False
Image.fromarray(outim).save(self.datadir+'testim%06d.png'%count)
mov.release()
#out.release()
def loadTrajectories(self, directory=None, mask='trajectory*.txt'):
self.trajectories={}
print directory
if not directory: directory=self.datadir
trajectoryfiles=glob(directory+mask)
print trajectoryfiles
for tr in trajectoryfiles:
data=np.loadtxt(tr)
num=int(''.join(c for c in basename(tr) if c in digits))
self.trajectories[num]=trajectory(data,num)
def plotTrajectories(self,outname='plottrajectories.png', ntrajectories=-1, lenlimit=-1, mpl=False, text=False, idx=2, cmap=False):
f0=self.getFrames(0, 1)
print 'mpl', mpl
if mpl:
f0=f0.astype(float)
if len(f0.shape)==3: f0=f0[:,:,0]
pl.imshow(f0, cmap=cm.gray)
else:
f0=np.dstack((f0,f0,f0))
keys=self.trajectories.keys()
if ntrajectories>0: trmax=ntrajectories
else: trmax=len(keys)
for i in range(trmax):
tr=self.trajectories[keys[i]]
cl=cm.jet(i/np.float(len(keys)))
if tr.data.shape[0]>lenlimit and len(tr.data.shape)>1:
print tr.number, tr.data.shape
if mpl:
pl.plot(tr.data[:,idx],tr.data[:,idx+1],lw=.3, color=cl)
if text: pl.text(tr.data[0,idx], tr.data[0,idx+1], str(tr.number), color=cl,fontsize=6)
else:
cv2.polylines(f0,[tr.data[idx:idx+2].astype(np.int32)],isClosed=False,color=cl,thickness=2)
if mpl:
pl.axis('off')
pl.savefig(self.datadir+outname,dpi=600)
pl.close('all')
else:
Image.fromarray(f0).save(self.datadir+outname)
def plotTrajectory(self, num): #note xy-indices changed! Rerun analysis if trajectories look strange!
self.loadTrajectories()
traj=self.trajectories[num]
try: f0=self.getFrames(int(traj.data[-1,0]), 1)
except: f0=self.getFrames(int(traj.data[-1,0])-1, 1)
f0=np.dstack((f0,f0,f0))
print f0.shape
cv2.polylines(f0,[traj.data[:,2:4].astype(np.int32)], False, (255,0,0), 2)
Image.fromarray(f0.astype(np.uint8)).save(self.datadir+"trajPlot%06d-frame%06d.jpg"%(num, traj.data[-1,0]))
def stitchTrajectories(self, maxdist, maxtime,timelim=50, save=False):#TODO: fix for particle indices!!!
tInit=time()
if self.trajectories=={}:
print "please load trajectories first (self.loadTrajectories())!"
else:
trBegEnd=np.array([-1.]*7)
nums=np.sorted(self.trajectories.keys())
for n in nums:
tr=self.trajectories[n]
trBegEnd=np.vstack((trBegEnd,np.array([tr.number]+list(tr.data[0,:])+list(tr.data[-1,:]))))
trBegEnd=trBegEnd[1:,:]
tMin,tMax=min(trBegEnd[:,1]),max(trBegEnd[:,1])
#remove
incompl=np.any(trBegEnd[:,[1,4]]!=[tMin,tMax],axis=1)
trBegEnd=trBegEnd[incompl,:]
print 'tmin',tMin,', tmax', tMax,'; number of trajectories', len(nums),', of which incomplete', np.sum(incompl)
precnum,folnum=-1,-1
while len(trBegEnd)>0 and time()-tInit<timelim:
#remove first line
no1st=trBegEnd[1:,:]
#print trBegEnd[0,0], 'test 0'
if trBegEnd[0,1]>tMin:
thisnum=trBegEnd[0,0]
#print 'test 1'
delT=trBegEnd[0,1]-no1st[:,4]
delRsq= (trBegEnd[0,2]-no1st[:,5])**2+(trBegEnd[0,3]-no1st[:,6])**2
#select all that match min < maxtime and dist < maxdist
candidates=all([delT>0, delT<maxtime, delRsq<maxdist**2],axis=0)
if np.sum(candidates)>0:
precnum=int(no1st[candidates,0][delRsq[candidates]==min(delRsq[candidates])][0])
precdata=self.trajectories[precnum].data
else: precdata=np.zeros((0,3))
delT=no1st[:,1]-trBegEnd[0,4]
delRsq= (no1st[:,2]-trBegEnd[0,5])**2+(no1st[:,3]-trBegEnd[0,6])**2
candidates=all([delT>0, delT<maxtime, delRsq<maxdist**2],axis=0)
if np.sum(candidates)>0:
folnum=int(no1st[candidates,0][delRsq[candidates]==min(delRsq[candidates])][0])
foldata=self.trajectories[folnum].data
else: foldata=np.zeros((0,3))
if precnum+folnum>-2:
print 'stitched trajectories %d,%d and %d, remaining trajectories %d'%(precnum,thisnum,folnum, len(no1st[:,0]))
newnum=max(nums)+1
nums=np.append(nums,newnum)
self.trajectories[newnum]=trajectory(np.vstack((precdata,self.trajectories[thisnum].data,foldata)),newnum)
if precnum>0: del self.trajectories[precnum]
if folnum>0: del self.trajectories[folnum]
del self.trajectories[thisnum]
no1st=no1st[no1st[:,0]!=precnum,:]
no1st=no1st[no1st[:,0]!=folnum,:]
trBegEnd=no1st
precnum,folnum=-1,-1
if save:
for tr in self.trajectories.values():
np.savetxt(save+'%06d.txt'%(tr.number),tr.data,fmt='%.03f', header="frame particle# x y area")
def Histogram(self, fnum, fname="temphist.png", channel=0):
"""plots the RGB histogram for frame # fnum. Auxiliary function for remote parameter setting. Replaces HistoWin in parameter GUI."""
image=self.getFrame(fnum)
if type(image).__name__=='ndarray':
if len(image.shape)==3 and image.shape[2]!=3: image=dstack((image[:,:,channel],image[:,:,channel],image[:,:,channel]))
pl.figure(figsize=[6,4])
for i in range(3):
pl.hist(image[:,:,i].flatten(), bins=256, log=True, histtype='step',align='mid',color='rgb'[i])
pl.savefig(fname,dpi=100,format='png')
pl.close("all")
def testImages(self,fnum, mask=False,BGrng=(100,255), channel=0):
"""analogue for ImgDisplay in parametergui.py"""
if type(mask).__name__=='str':
try:
im=np.array(Image.open(self.datadir+'mask.png'))
if len(im.shape)==3: im=im[:,:,channel]
mask=(im>0).astype(float)
except: mask=zeros(self.movie.shape[::-1])+1.
else: mask=zeros(self.movie.shape[::-1])+1.
if type(self.bg).__name__!='ndarray':
if os.path.exists(self.datadir+'bg.png'):
self.loadBG()
else:
bg=self.getBG(rng=BGrng, num=50, spac=int(self.parameters['frames']/51), prerun=100, save=True)
image=self.getFrame(fnum)
orig=image.copy()
Image.fromarray(orig.astype(np.uint8)).save(self.datadir+'orig.png')
if len(image.shape)>2: image=image[:,:,channel]
bgsub=image.astype(float)-self.bg
bgsub=mxContr(bgsub)*mask
Image.fromarray(bgsub.astype(np.uint8)).save(self.datadir+'bgsub.png')
thresh=mxContr((bgsub<self.parameters['threshold']).astype(int))
if self.parameters['struct']>0:
self.kernel= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.parameters['struct'],self.parameters['struct']))
thresh=cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel)
thresh=thresh*mask
Image.fromarray(thresh.astype(np.uint8)).save(self.datadir+'thresh.png')
contours, hierarchy=cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cont=[]
particles=orig.copy()
for i in range(len(contours)):
M=cv2.moments(contours[i])
area= M['m00']
if self.parameters['blobsize'][0]< area < self.parameters['blobsize'][1]:
cont+=[contours[i]]
cv2.fillPoly(particles,cont,color=(255,120,0))
Image.fromarray(particles.astype(np.uint8)).save(self.datadir+'particles.png')
def gridAnalysis(self, rspacing=10, tspacing=10, trajectory=False, corr=100, plot=True):
#to make things tractable, use curvature spacing
#make zero array, dims imagedims/spacing, x movielength/timespacing
#
t1=time()
if self.trajectories!={}:
if not trajectory:
alldata=np.vstack([tr.data[tspacing/2::tspacing,:] for tr in self.trajectories.values()])
bins=(np.arange(self.parameters['frames']/tspacing+1)*tspacing,np.arange(self.parameters['imsize'][0]/rspacing+1)*rspacing,np.arange(self.parameters['imsize'][1]/rspacing+1)*rspacing)
try:
hist=np.histogramdd(alldata, bins)[0]
tcorr=[]
t1=time()
for i in range(corr):
tcorr+=[np.mean((hist*np.roll(hist,-i, axis=0))[:-i])]
if i%20==0: print i,str(timedelta(seconds=time()-t1))
np.savez_compressed(mov.datadir+'gridanalysis',np.array(tcorr),hist,bins[0],bins[1],bins[2])
np.savetxt(mov.datadir+'crossing.txt', np.array(tcorr), fmt='%.5e')
return np.array(tcorr), hist,bins
except MemoryError:
print "Excessive histogram size! Please increase granularity!"
return numpy.np.zeros((0)),numpy.np.zeros((0,0,0)), (numpy.np.zeros((0)),numpy.np.zeros((0)),numpy.np.zeros((0)))
else:
alldata=np.vstack([tr.data[tspacing/2::tspacing,:] for tr in self.trajectories.values() if tr.number != trajectory])#time subset corresponding to trajectory length
thistrajectory=self.trajectories[trajectory]
bins=(np.arange(self.parameters['frames']/tspacing+1)*tspacing,np.arange(self.parameters['imsize'][0]/rspacing+1)*rspacing,np.arange(self.parameters['imsize'][1]/rspacing+1)*rspacing)
try:
hist=np.histogramdd(alldata, bins)[0]
tcorr=[]
trtimes=thistrajectory.data[tspacing/2::tspacing,0]/np.float(tspacing)
for i in range(corr):
tcorr+=[np.mean((hist*np.roll(hist,-i, axis=0))[:-i])]
if i%20==0: print i,str(timedelta(seconds=time()-t1))
np.savez_compressed(mov.datadir+'gridanalysis',gA[0],gA[1],gA[2][0],gA[2][1],gA[2][2])
np.savetxt(mov.datadir+'crossing.txt', gA[0], fmt='%.5e')
return np.array(tcorr), hist,bins
except MemoryError:
print "Excessive histogram size! Please increase granularity!"
return numpy.np.zeros((0)),numpy.np.zeros((0,0,0)), (numpy.np.zeros((0)),numpy.np.zeros((0)),numpy.np.zeros((0)))
else:
print "no trajectories found!"
return numpy.np.zeros((0,0,0))
def maze(self, ROI1, ROI2, rootdir='', fmask='trajector*.txt', destination='cut', idx=2):
self.mazetraj={}
if rootdir=='': rootdir=self.datadir
fmask=rootdir+fmask
files=sorted(glob(fmask))
destination=rootdir+destination
print destination, os.path.exists(destination)
if not os.path.exists(destination): os.mkdir(destination)
tlist=[]
for f in files:
data=np.loadtxt(f)
num=int(''.join(c for c in basename(f) if c in digits))
try:
ind_in=np.argmax((data[:,idx]>ROI1[0])*(data[:,idx]<ROI1[2])*(data[:,idx+1]>ROI1[1])*(data[:,idx+1]<ROI1[3]))
ind_out=np.argmax((data[:,idx]>ROI2[0])*(data[:,idx]<ROI2[2])*(data[:,idx+1]>ROI2[1])*(data[:,idx+1]<ROI2[3]))
if ind_out*ind_in>0:
duration=ind_out-ind_in
dist=sum(np.sqrt((np.roll(data[ind_in:ind_out,1],-1)-data[ind_in:ind_out,1])**2+(np.roll(data[ind_in:ind_out,2],-1)-data[ind_in:ind_out,2])**2)[:-1])
tlist+=[[data[ind_in,0],duration,dist]]
np.savetxt(destination+os.sep+os.path.basename(f)[:-4]+'-maze.txt', data[ind_in:ind_out,:],fmt="%.2f")
self.mazetraj[num]=data[ind_in:ind_out,:]
except:
raise
print f, 'error!'
return np.array(tlist)
class clusterMovie(movie):
def __init__(self,fname, TTAB=-1, bg=''):
movie.__init__(self,fname,TTAB=TTAB,bg=bg)
self.typ="Clusters"
self.bg=False
def getClusters(self,thresh=128,gkern=61,clsize=(1,1e5),channel=0,rng=(1,1e8),spacing=100, maskfile='', circ=[0,0,1e4], imgspacing=-1):
print 'thresh', thresh, 'gkern',gkern, 'clsize', clsize, 'channel', channel, 'rng', rng, 'spacing', spacing, 'mask', maskfile, 'circle',circ
t0=time()
if os.path.exists(maskfile):
mask=np.array(Image.open(maskfile))[:,:,0]
mask=(mask>0).astype(float)
else:
mask=np.zeros(self.movie.shape[::-1])+1.
mov=cv2.VideoCapture(self.fname)
framenum=rng[0]
if rng[0]>1:
success,image,p= self.gotoFrame(mov,rng[0]-1)
gkern=int(gkern)
if gkern%2==0:
print "warning: Gaussian kernel size has to be odd. New size %d."%(gkern+1)
blur=blur+1
allblobs=np.empty((0,6))
success=True
while success and framenum<rng[1]:
framenum+=1
if framenum%500==0: print framenum, time()-t0, allblobs.shape
success,image=mov.read()
if not success: break
if framenum%spacing==0:
if imgspacing!=-1:
vorIm=image.copy()
clustIm=image.copy()
image=image[:,:,channel]
blurIm=(mxContr(image)*mask+255*(1-mask))
blurIm=cv2.GaussianBlur(blurIm,(gkern,gkern),0)
threshIm=mxContr((blurIm<thresh).astype(int))
if framenum==100:
Image.fromarray(threshIm).save(self.datadir+'thresh.png')
Image.fromarray(mxContr(blurIm).astype(np.uint8)).save(self.datadir+'blur.png')
Image.fromarray(image).save(self.datadir+'orig.png')
cnt,hier=cv2.findContours(threshIm,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
blobs=np.empty((0,6))
count=0
if framenum%(spacing*imgspacing)==0 and imgspacing>0:
savecnt=[]
for c in range(len(cnt)):
mnt=cv2.moments(cnt[c])
if clsize[0]<mnt['m00']<clsize[1]:
count+=1
blobs=np.vstack((blobs,np.array([framenum,count,mnt['m00'], mnt['m10']/mnt['m00'], mnt['m01']/mnt['m00'],-1])))
if framenum%(spacing*imgspacing)==0 and imgspacing>0:
savecnt+=[cnt[c]]
if vorflag and blobs.shape[0]>1 and circ[0]!=0:
try:
newpoints=[]
vor=Voronoi(blobs[:,3:5])
dists=np.sum((vor.vertices-np.array(circ[:2]))**2,axis=1)-circ[2]**2
#extinds=[-1]+(dists>0).nonzero()[0]
for i in range(blobs.shape[0]):
r=vor.regions[vor.point_region[i]]
newpoints+=[circle_invert(blobs[i,3:5],circ, integ=True)]
pts=np.vstack((blobs[:,3:5],np.array(newpoints)))
vor=Voronoi(pts)
for i in range(blobs.shape[0]):
r=vor.regions[vor.point_region[i]]
if -1 not in r:
blobs[i,-1]=PolygonArea(vor.vertices[r])
if framenum%(spacing*imgspacing)==0 and imgspacing>0:
col=tuple([int(255*c) for c in cm.jet(i*255/blobs.shape[0])])[:3]
cv2.polylines(vorIm, [(vor.vertices[r]).astype(np.int32)], True, col[:3], 2)
cv2.circle(vorIm, (int(blobs[i,3]),int(blobs[i,4])),5,(255,0,0),-1)
if framenum%(spacing*imgspacing)==0 and imgspacing>0:
cv2.circle(vorIm, (int(circ[0]),int(circ[1])), int(circ[2]),(0,0,255),2)
Image.fromarray(vorIm).save(self.datadir+'vorIm%05d.jpg'%framenum)
except QhullError:
print "Voronoi construction failed!"
if framenum%(spacing*imgspacing)==0 and imgspacing>0:
count = 0
for b in range(len(blobs)):
cv2.putText(clustIm,str(count), (int(blobs[count,3]),int(blobs[count,4])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0),2)
count +=1
cv2.drawContours(clustIm,[savecnt[b]],-1,(0,255,120),2)
Image.fromarray(clustIm).save(self.datadir+'clustIm%05d.jpg'%framenum)
allblobs=np.vstack((allblobs,blobs))
np.savetxt(self.datadir+'clusters.txt',allblobs,fmt="%.2f", header="framenum cluster# area x y voronoiarea")
print 'thresh', thresh, 'gkern',gkern, 'clsize', clsize, 'channel', channel, 'rng', rng, 'spacing', spacing, 'mask', maskfile
return allblobs
class imStack(movie):
def __init__(self,fname):
self.typ="3D stack"
self.bg=False
self.fname=fname
spex=os.path.splitext(os.path.basename(fname))
search=re.sub('[0-9]',"?",spex[0])
self.stack=sorted(glob(os.path.dirname(fname)+os.sep+search+spex[1]))
test0,test1=self.stack[:2]
while test0!=test1: test0,test1=test0[:-1],test1[:-1]
while test0[-1] in '0123456789': test0=test0[:-1]
self.datadir=test0+'-data'+sep
try:
im=cv2.imread(self.stack[0],1)
shape=im.shape[:2]
framerate=-1
frames=len(self.stack)
framelim=(0,frames)
except:
shape=(0,0)
framerate=0.
frames=0.
framelim=(0,1e8)
self.parameters={
'framerate':framerate, 'sphericity':-1.,'xscale':1.0,'yscale':1.0,'zscale':1.0,#floats
'blobsize':(0,30),'imsize':shape,'crop':[0,0,shape[0],shape[1]], 'framelim':framelim,'circle':[shape[0]/2, shape[1]/2, int(np.sqrt(shape[0]**2+shape[1**2]))],#tuples
'channel':0, 'blur':1,'spacing':1,'struct':1,'threshold':128, 'frames':frames, 'imgspacing':-1,'maxdist':-1,'lossmargin':10, 'lenlim':1,#ints
'sizepreview':True, 'invert':False, 'diskfit':False, 'mask':True
}
def getFrame(self,framenum):
"""Retrieves frame of number framenum from opened stack. Returns numpy array image, or False if unsuccessful."""
try:
image=cv2.imread(self.stack[framenum],1)
return image
except:
return False
def extractCoords(self,framelim=False, blobsize=False, threshold=False, kernel=False, delete=True, mask=False, channel=False, sphericity=-1, diskfit=True, blur=1,invert=True,crop=False, contours=False): #fix the argument list! it's a total disgrace...
tInit=time()
contdict={}
if not framelim: framelim=self.parameters['framelim']
if not blobsize: blobsize=self.parameters['blobsize']
if not threshold: threshold=self.parameters['threshold']
if not channel: channel=self.parameters['channel']
if not crop: crop=self.parameters['crop']
if type(kernel).__name__!='ndarray': kernel=np.array([1]).astype(np.uint8)
if not exists(self.datadir):
os.mkdir(self.datadir)
if delete:
try: os.remove(self.datadir+'coords.txt')
except: pass
dumpfile=open(self.datadir+'coords.txt','a')
dumpfile.write('#frame particle# blobsize x y split_blob? [reserved] sphericity\n')
allblobs=np.array([]).reshape(0,8)
counter=0
for i in range(len(self.stack)):
if i%200==0:
print 'frame',i, 'time', str(timedelta(seconds=time()-tInit)), '# particles', counter #progress marker
np.savetxt(dumpfile,allblobs,fmt="%.2f")
allblobs=np.array([]).reshape(0,8)
image=self.getFrame(i)
if type(image).__name__=='ndarray':
if image.shape[:2]!=(crop[2]-crop[0],crop[3]-crop[1]):
if len(image.shape)==2: image=image[crop[0]:crop[2],crop[1]:crop[3]]
if len(image.shape)==3: image=image[crop[0]:crop[2],crop[1]:crop[3],:]
if len(image.shape)>2:
image=image[:,:,channel].astype(float)
#image=mxContr(image) #TODO: this might be a few rescalings too many. try to make this simpler, but make it work first
thresh=mxContr((image<threshold).astype(int))
if type(kernel).__name__=='ndarray': thresh=cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
if invert: thresh=255-thresh
if np.amin(thresh)!=np.amax(thresh):
if contours: blobs,conts=extract_blobs(thresh,i,sphericity=sphericity,blobsize=blobsize,diskfit=diskfit, returnCont=True)
else: blobs,conts=extract_blobs(thresh,i,sphericity=sphericity,blobsize=blobsize,diskfit=diskfit, returnCont=False),[]
else: blobs,conts=np.array([]).reshape(0,8),[]
counter=blobs.shape[0]
try: allblobs=np.vstack((allblobs,blobs))
except ValueError:
pass
#print "Value Error!", allblobs.shape, blobs.shape
for i in range(len(conts)): contdict["%d-%d"%(blobs[0,0],i)]=conts[i]
np.savetxt(dumpfile,allblobs,fmt="%.2f")
dumpfile.close()
with open(self.datadir+'coords.txt','r') as f: tempdata=f.read()[:-1]
with open(self.datadir+'coords.txt','w') as f: f.write(tempdata)
if len(contdict)>0:
with open(self.datadir+'contours.pkl','wb') as f:
cPickle.dump(contdict,f,cPickle.HIGHEST_PROTOCOL)
def blenderPrep(self, nfacets=10, smoothlen=5):
self.loadTrajectories()
if len(self.trajectories)>0 and os.path.isfile(self.datadir+'contours.pkl'):
with open(self.datadir+'contours.pkl', 'rb') as f:
conts=cPickle.load(f)
todelete=glob(self.datadir+'pointfile*.txt')+glob(self.datadir+'vertfile*.txt')
for fname in todelete: os.remove(fname)
for j in self.trajectories.keys():
t1=self.trajectories[j]
if len(t1.data.shape)==2:
keys=[r[0].replace('.','-') for r in t1.data[:,:1].astype(str)]
with open(self.datadir+'pointfile%03d.txt'%t1.number, 'a') as pointfile:
data=conts[keys[0]].flatten().reshape(-1,2)
pointfile.write('%.2f %.2f %.2f \n'%(np.mean(data[:,0]),np.mean(data[:,1]),float(keys[0].split('-')[0])))
for i in range(len(keys)):
zvals=np.array([int(keys[i].split('-')[0])]*nfacets)
data=conts[keys[i]].flatten().reshape(-1,2)
x,y=data[:,0],data[:,1]
xnew,ynew=smooth(x,smoothlen),smooth(y,smoothlen)
inds=list(np.linspace(0,len(xnew)-1,nfacets).astype(int))
np.savetxt(pointfile,np.vstack((xnew[inds],ynew[inds],zvals)).T, fmt='%.2f')
pointfile.write('%.2f %.2f %.2f'%(np.mean(x),np.mean(y),zvals[0]))
verts=[[0,i-1,i,-1] for i in range(2,nfacets+1)]+[[0,nfacets,1,-1]]
verts+=[[(j-1)*nfacets+k-1,(j-1)*nfacets+k,j*nfacets+k,j*nfacets+k-1] for j in range(1,len(keys)) for k in range(2,nfacets+1)]
verts+=[[(j-1)*nfacets+nfacets, (j-1)*nfacets+1, j*nfacets+1, j*nfacets+nfacets] for j in range(1,len(keys))]
verts+=[[nfacets*len(keys)+1,nfacets*len(keys)-i-1,nfacets*len(keys)-i,-1] for i in range(nfacets-1)]+[[nfacets*len(keys)+1,nfacets*len(keys), nfacets*(len(keys)-1)+1,-1]]
np.savetxt(self.datadir+'vertfile%03d.txt'%t1.number,np.array(verts),fmt="%d")
class nomovie(movie):
def __init__(self,datadir):
self.typ="none"
self.bg=False
self.datadir=datadir
self.parameters={
'framerate':-1, 'sphericity':-1.,'xscale':1.0,'yscale':1.0,'zscale':1.0,#floats
'blobsize':(0,30),'imsize':(1920,1080),'crop':[0,0,1920,1080], 'framelim':1e9,'circle':(1000, 500,500),#tuples
'channel':0, 'blur':1,'spacing':1,'struct':1,'threshold':128, 'frames':1e9, 'imgspacing':-1,'maxdist':-1,'lossmargin':10, 'lenlim':1,#ints
'sizepreview':True, 'invert':False, 'diskfit':False, 'mask':True
}
def getFrames(self,position, length=1, channel=0,spacing=1, fname=''):
if fname=='': fname=self.datadir+'bg.png'
im=np.array(Image.open(fname))
if len(im.shape)==3: im=im[:,:,channel]
images=np.dstack([im]*length)
return images
def extract_blobs(bwImg, framenum, blobsize=(0,1e5), sphericity=-1, diskfit=True, outpSpac=200,returnCont=False, spherthresh=1e5):
contours, hierarchy=cv2.findContours(bwImg,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
blobs=[]
if returnCont: listcont=[]
for i in range(len(contours)):
cont=contours[i]
M=cv2.moments(cont)
(xm,ym),rm=cv2.minEnclosingCircle(cont)
area= M['m00'] #adapt for aggregates
marea=np.pi*rm**2
if area>0: spher=marea/area
else:spher=0
if sphericity>=1 and area>blobsize[0] and spher<spherthresh:
if spher>sphericity:
hull = cv2.convexHull(cont,returnPoints = False)
try:
defects = cv2.convexityDefects(cont,hull)
farpts=[]
for j in range(defects.shape[0]):
s,e,f,d = defects[j,0]
if d>500: farpts+=[f]
defectflag=True
except AttributeError, IndexError:
#no proper defects? just try to fit something into this contour
if blobsize[0]<area<blobsize[1]:
if diskfit: blobs=blobs+[[framenum,0,marea,xm, ym, 0,-1, spher]]
else: blobs=blobs+[[framenum,0,area,M['m10']/M['m00'], M['m01']/M['m00'], 0,-1,spher]]
if returnCont: listcont+=[cont]
defectflag=False
if defectflag:
farpts=np.array(farpts)
farpts.sort()
if len(farpts)>1:
cts=[]
for j in range(len(farpts)-1):
x=[cont[p][0][0] for p in range(farpts[j],farpts[j+1]+1)]
y=[cont[p][0][1] for p in range(farpts[j],farpts[j+1]+1)]
try:
xc,yc,r,res=leastsq_circle(x,y)
cts+=[[xc,yc,np.pi*r**2]]
except: pass
x=[cont[p][0][0] for p in range(farpts[-1], len(cont))]+[cont[p][0][0] for p in range(farpts[0])]
y=[cont[p][0][1] for p in range(farpts[-1], len(cont))]+[cont[p][0][1] for p in range(farpts[0])]
try:
xc,yc,r,res=leastsq_circle(x,y)
cts+=[[xc,yc,np.pi*r**2]]
except: pass
cts=np.array(cts)
inds=np.arange(len(cts[:,0]))
newcts=[]
while len(inds)>0:
this=inds[0]
try:
inds=inds[1:]
if blobsize[0]<cts[this,2]<blobsize[1]:
newcts+=[cts[this]]
distAr=np.pi*((cts[this,0]-cts[inds,0])**2+(cts[this,1]-cts[inds,1])**2)
inds=inds[(distAr-.5*cts[this,2])>0]
except IndexError: break
for circle in newcts:
blobs=blobs+[[framenum,0,circle[2],circle[0], circle[1], 1,-1,spher]]
if returnCont: listcont+=[cont]
elif area<blobsize[1]:
if diskfit:
blobs=blobs+[[framenum,0,marea,xm, ym, 0,-1,spher]]
if returnCont: listcont+=[cont]
else:
x,y=M['m10']/M['m00'],M['m01']/M['m00']
blobs=blobs+[[framenum,0,area,x,y, 0,-1,spher]]
if returnCont: listcont+=[cont]
elif blobsize[0]<area<blobsize[1] and spher<spherthresh:
if diskfit:
blobs=blobs+[[framenum,0,marea,xm, ym, 0, -1,spher]]
if returnCont: listcont+=[cont]
else:
x,y=M['m10']/M['m00'],M['m01']/M['m00']
blobs=blobs+[[framenum,0,area,x,y, 0,-1,spher]]
if returnCont: listcont+=[cont]
blobs=np.array(blobs)
if len(blobs)>0:
try: blobs[:,1]=np.arange(blobs.shape[0])
except IndexError:
print blobs
if framenum%outpSpac==0: print "# blobs: ", len(blobs)
if returnCont:
return blobs,listcont
else: return blobs
def msqd(data,length):
data=data[~np.isnan(data).any(1)]
return np.array([np.mean(np.sum((data-np.roll(data,i, axis=0))[i:]**2,axis=1)) for i in range(length)])
def mxContr(data):
mx,mn=np.float(np.amax(data)),np.float(np.amin(data))
if mx!=mn:
return (255*(np.array(data)-mn)/(mx-mn)).astype(np.uint8)
else:
print 'Warning, monochrome image!'
return 0.*np.array(data).astype(np.uint8)
def stitchMovies(mlist, outname=None, decim=10,scale=1, crop=[0,0,0,0], frate=24,channel=-1, invert=False, ims=False, rotation=0):
"""crop values: [left, bottom, right, top]"""
if not outname: outname=mlist[0]+'-stitched.avi'
mov=cv2.VideoCapture(mlist[0])
success,image=mov.read()
if success:
for i in range(4): crop[i]=scale*16*(int(crop[i])/(scale*16))
sh=image.shape
nsize=(sh[1]-crop[0]-crop[2])/scale,(sh[0]-crop[1]-crop[3])/scale
bds=[crop[3],sh[0]-crop[1],crop[0],sh[1]-crop[2]]
print sh,nsize,crop
out=cv2.VideoWriter(outname,cv2.cv.CV_FOURCC('D','I','V','X'),frate,(nsize[0],nsize[1]))
mov.release()
for movie in mlist:
mov=cv2.VideoCapture(movie)
success=True
count=0.
while success:
success,image=mov.read()
if count%decim==0 and not isinstance(image, types.NoneType):
if invert: image=255-image
if rotation!=0:
M = cv2.getRotationMatrix2D((image.shape[1]/2, image.shape[0]/2), rotation, 1.)
image= cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
if channel==-1: image=image[bds[0]:bds[1],bds[2]:bds[3],:]
else: image=cv2.cvtColor(image[bds[0]:bds[1],bds[2]:bds[3],channel],cv2.cv.CV_GRAY2RGB)
outim=imresize(image,1./scale)
out.write(outim)
count+=1
if count%50==0: print count,outim.shape
mov.release()
def calc_R(x,y, xc, yc):
""" calculate the distance of each 2D points from the center (xc, yc) """
return np.sqrt((x-xc)**2 + (y-yc)**2)
def rfunc(c, x, y):
""" calculate the algebraic distance between the data points and the mean circle centered at c=(xc, yc) """
Ri = calc_R(x, y, *c)
return Ri - Ri.mean()
def leastsq_circle(x,y):
# coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
center_estimate = x_m, y_m
center, ier = optimize.leastsq(rfunc, center_estimate, args=(x,y))
xc, yc = center
Ri = calc_R(x, y, *center)
R = Ri.mean()
residu = np.sum((Ri - R)**2)
return xc, yc, R, residu
def lin_traj(x,y):
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
mx=np.mean((x-np.roll(x,1))[1:])
return np.array([x[-1]+mx, y[-1]+slope*mx])
#http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
def PolygonArea(corners):
n = len(corners) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
#http://stackoverflow.com/questions/21732123/convert-true-false-value-read-from-file-to-boolean
def str_to_bool(s):
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError("Cannot convert {} to bool".format(s))
#scipy cookbook http://wiki.scipy.org/Cookbook/SignalSmooth
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
def circle_invert(pt, cr, integ=True):
"""Inverts point (inside) at circle cirumference. Used to create mirror clusters for Voronoi construction.
arguments point: (x,y), circle: (xc,yc,r). returns (x,y) as float"""
d=np.sqrt((pt[0]-cr[0])**2+(pt[1]-cr[1])**2) #distance to centre
scf=2*cr[2]/d-1 #scaling factor
newpt=[cr[0]+(pt[0]-cr[0])*scf, cr[1]+(pt[1]-cr[1])*scf]
if integ: newpt=[int(p) for p in newpt]
return newpt
def randwalk(lgth,stiff,start=[0.,0.], step=1.,adrift=0.,anoise=.2, dist="const"):
"""generates a 2d random walk with variable angular correlation and optional constant (+noise) angular drift.
Arguments: walk length "lgth", stiffness factor "stiff" - consecutive orientations are derived by adding stiff*(random number in [-1,1]).
Parameters "adrift" and "anoise" add a constant drift angle adrift modulated by a noise factor adrift*(random number in [-1,1]).
The dist parameter accepts non-constant step length distributions (right now, only cauchy/gaussian distributed random variables)"""
rw=[list(start)] #user provided initial coordinates: make (reasonably) sure it's a nested list type.
ang=[0]
#step lengths are precalculated for each step to account for
if dist=="cauchy": steps=stats.cauchy.rvs(size=lgth)
elif dist=="norm": steps=stats.norm.rvs(size=lgth)
else: steps=np.array([step]*lgth) #Overkill for constant step length ;)
#first generate angular progression via cumulative sum of increments (random/stiff + drift terms)
angs=np.cumsum(stiff*stats.uniform.rvs(size=lgth,loc=-1,scale=2)+adrift*(1.+anoise*stats.uniform.rvs(size=lgth,loc=-1,scale=2)))
#x/y trace via steplength and angle for each step, some array reshuffling (2d conversion and transposition)
rw=np.concatenate([np.concatenate([np.array(start[:1]),np.cumsum(steps*np.sin(angs))+start[0]]),np.concatenate([np.array(start)[1:],np.cumsum(steps*np.cos(angs))+start[1]])]).reshape(2,-1).transpose()
return rw, angs
def rw3d(lgth,stiff,start=[0.,0.,0.], step=1.,adrift=0.,anoise=.2, dist="const"):
rw=[list(start)] #user provided initial coordinates: make (reasonably) sure it's a nested list type.
ang=[0]
#step lengths are precalculated for each step to account for
if dist=="cauchy": steps=stats.cauchy.rvs(size=lgth)
elif dist=="norm": steps=stats.norm.rvs(size=lgth)
else: steps=np.array([step]*lgth) #Overkill for constant step length ;)
#first generate angular progression via cumulative sum of increments (random/stiff + drift terms)
thetas=np.cumsum(stiff*stats.uniform.rvs(size=lgth,loc=-1,scale=2)+adrift*(1.+anoise*stats.uniform.rvs(size=lgth,loc=-1,scale=2)))
phis=np.cumsum(stiff*stats.uniform.rvs(size=lgth,loc=-1,scale=2)+adrift*(1.+anoise*stats.uniform.rvs(size=lgth,loc=-1,scale=2)))
#x/y trace via steplength and angle for each step, some array reshuffling (2d conversion and transposition)
rw=np.concatenate([#
np.concatenate([np.array(start[:1]),np.cumsum(steps*np.sin(thetas)*np.sin(phis))+start[0]]),#
np.concatenate([np.array(start)[1:2],np.cumsum(steps*np.sin(thetas)*np.cos(phis))+start[1]]),#
np.concatenate([np.array(start)[2:],np.cumsum(steps*np.cos(thetas))+start[2]])
]).reshape(3,-1).transpose()
return rw, thetas, phis
def scp(a1,a2,b1,b2):
"""returns the cosine between two vectors a and b via the normalised dot product"""
return (a1*b1+a2*b2)/(np.sqrt(a1**2+a2**2)*np.sqrt(b1**2+b2**2))
def correl(d,tmax):
"""Cosine correlation function: """
c=[]
for dT in range(tmax):
c+=[np.mean(np.array([scp(d[j,0]-d[j-1,0], d[j,1]-d[j-1,1], d[j+dT,0]-d[j+dT-1,0], d[j+dT,1]-d[j+dT-1,1]) for j in range(1,len(d[:,0])-tmax)]))]
return c
|
cmaass/swimmertracking
|
readtraces.py
|
Python
|
gpl-2.0
| 69,835
|
[
"Gaussian"
] |
9b228fe964667f9443e10bfb4f689b750b3421fb8d8c0a80d8be96cd345ded02
|
# Code from Chapter 2 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
# Plots a 1D Gaussian function
import pylab as pl
import numpy as np
gaussian = lambda x: 1/(np.sqrt(2*np.pi)*1.5)*np.exp(-(x-0)**2/(2*(1.5**2)))
x = np.arange(-5,5,0.01)
y = gaussian(x)
pl.ion()
pl.plot(x,y,'k',linewidth=3)
pl.xlabel('x')
pl.ylabel('y(x)')
pl.axis([-5,5,0,0.3])
pl.title('Gaussian Function (mean 0, standard deviation 1.5)')
pl.show()
|
Anderson-Lab/anderson-lab.github.io
|
csc_466_2021_spring/MLCode/Ch2/plotGaussian.py
|
Python
|
mit
| 730
|
[
"Gaussian"
] |
bd0bfd057912d09eca113219d220370c98747c3886c6753e73b83894a576ee6c
|
#! /usr/bin/env python
"""
list replicas for files in the FileCatalog
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
from COMDIRAC.Interfaces import critical, error
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces import DCatalog
from COMDIRAC.Interfaces import pathFromArgument
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from COMDIRAC.Interfaces import ConfigCache
Script.setUsageMessage(
"\n".join(
[
__doc__.split("\n")[1],
"Usage:",
" %s lfn..." % Script.scriptName,
"Arguments:",
" lfn: logical file name",
]
)
)
configCache = ConfigCache()
Script.parseCommandLine(ignoreErrors=True)
configCache.cacheConfig()
args = Script.getPositionalArgs()
session = DSession()
catalog = DCatalog()
if len(args) < 1:
error("No argument provided\n%s:" % Script.scriptName)
Script.showHelp()
DIRAC.exit(-1)
exitCode = 0
for arg in args:
# lfn
lfn = pathFromArgument(session, args[0])
# fccli.do_replicas( lfn )
ret = returnSingleResult(catalog.catalog.getReplicas(lfn))
if ret["OK"]:
replicas = ret["Value"]
print(lfn + ":")
for se, path in replicas.items():
print(" ", se, path)
else:
error(lfn + ": " + ret["Message"])
exitCode = -2
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
DIRACGrid/COMDIRAC
|
src/COMDIRAC/Interfaces/scripts/dreplicas.py
|
Python
|
gpl-3.0
| 1,753
|
[
"DIRAC"
] |
1df41d27b5ce3a660533054e0ced77068d87328c25da580b73e86e9e6df593d2
|
#
#
# File to test current configuration of GranuleCell project.
#
# To execute this type of file, type '..\..\..\nC.bat -python XXX.py' (Windows)
# or '../../../nC.sh -python XXX.py' (Linux/Mac). Note: you may have to update the
# NC_HOME and NC_MAX_MEMORY variables in nC.bat/nC.sh
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
import sys
import os
try:
from java.io import File
except ImportError:
print "Note: this file should be run using ..\\..\\..\\nC.bat -python XXX.py' or '../../../nC.sh -python XXX.py'"
print "See http://www.neuroconstruct.org/docs/python.html for more details"
quit()
sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc # Many useful functions such as SimManager.runMultipleSims found here
projFile = File("../VSCSGranCell.neuro.xml")
############## Main settings ##################
simConfigs = []
#simConfigs.append("Default Simulation Configuration")
simConfigs.append("CellsPulse500ms")
simDt = 0.001
simulators = ["NEURON"]
varTimestepNeuron = True
varTimestepTolerance = 0.00001
plotSims = True
plotVoltageOnly = True
runInBackground = True
analyseSims = True
verbose = True
#############################################
def testAll(argv=None):
if argv is None:
argv = sys.argv
print "Loading project from "+ projFile.getCanonicalPath()
simManager = nc.SimulationManager(projFile,
verbose = verbose)
simManager.runMultipleSims(simConfigs = simConfigs,
simDt = simDt,
simulators = simulators,
runInBackground = runInBackground,
varTimestepNeuron = varTimestepNeuron,
varTimestepTolerance = varTimestepTolerance)
simManager.reloadSims(plotVoltageOnly = plotVoltageOnly,
analyseSims = analyseSims)
# These were discovered using analyseSims = True above.
# They need to hold for all simulators
spikeTimesToCheck = {'AllChansCG_mod_0': [168.2, 219.7, 270.5, 320.2, 369.3, 417.9, 466.1, 514.1, 561.9]}
spikeTimeAccuracy = 0.1
report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck,
spikeTimeAccuracy = spikeTimeAccuracy)
print report
return report
if __name__ == "__main__":
testAll()
|
pgleeson/TestArea
|
models/VSCSGranCell/pythonScripts/RunTests.py
|
Python
|
gpl-2.0
| 2,810
|
[
"NEURON"
] |
ea7abec5ad49b1b63b3da826cf2cb5e801410971a619e81936533222523e9cff
|
# -*- coding: utf-8 -*-
{
"'Cancel' will indicate an asset log entry did not occur": "' 취소 ' 자산 로그 항목을 표시합니다 발생하지 않았습니다.",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "위치 이 지역의 지리적 영역을 지정합니다. 이 위치 계층 위치일 수 있습니다, ' group ', 또는 해당 영역의 경계에 있는 위치.",
"Acronym of the organization's name, eg. IFRC.": 'acronym 조직의 이름, 예 ifrc.',
"Authenticate system's Twitter account": '인증할 사용자의 시스템 twitter 계정',
"Can't import tweepy": 'tweepy를 가져올 수 없습니다',
"Caution: doesn't respect the framework rules!": '주의: 프레임워크는 규칙에 대해 않습니다!',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "형식 속성 값 및 rgb 값을 사용하십시오 이러한 json 오브젝트 (예: {빨간색 목록으로 '#FF0000 ', 초록색으로 '#00FF00 ', yellow: '#FFFF00 '}",
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": '선택한 경우, 이 자산의 위치를 사용자의 위치 갱신될 때마다 갱신됩니다.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": '이 구성은 region 메뉴의 영역을 나타내는 경우, 이름 메뉴에서 사용할 수 있습니다. 개인용 맵 구성에 대한 이름은 사용자의 이름으로 설정됩니다.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": '이 필드를 채우지 않으면 다음 이 조직 최대 서명할 때 지정하는 사용자 도메인 필드에 도메인 일치하지 않는 한 이 조직의 직원은 지정됩니다.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": '이 체크 인 경우 이 사용자의 기본 위치 및 따라서 사용자가 맵에 표시됩니다 됩니다',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "목록에 있는 병원 보이지 않는 경우, 링크 병원 ' 추가 ' 를 눌러 새로 추가할 수 있습니다.",
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "목록에 있는 사무실 보이지 않는 경우 링크 ' 부재중 ' 추가를 클릭하여 새로 추가할 수 있습니다.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "목록에서 조직 표시되지 않으면, 링크 추가 ' 조직 ' 을 클릭하여 새 추가할 수 있습니다.",
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": '대신 자동으로 네트워크를 통해 다른 피어에서 동기화, 파일, 필요한 곳에 네트워크 의 경우 동기화 수. 이 페이지에서 파일 동기화 데이터 반입 및 데이터 파일을 sync 내보낼 수 있습니다. 이 페이지로 이동할 수 있는 링크를 누르십시오.',
"Level is higher than parent's": '상위 레벨이 아닌 경우',
"Need a 'url' argument!": "' url ' 인수가 필요합니다!",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "선택사항입니다. geometry 컬럼의 이름. postgis 의 기본값은 ' the_geom'. 수",
"Parent level should be higher than this record's level. Parent level is": '상위 레벨 이상 이 레코드 레벨 이상이어야 합니다. 상위 레벨',
"Password fields don't match": '암호 필드가 일치하지 않습니다.',
"Phone number to donate to this organization's relief efforts.": '전화 번호 이 조직의 릴리프 위해 노력을 donate.',
"Please come back after sometime if that doesn't help.": '해당 되는 경우 후 sometime.',
"Quantity in %s's Inventory": '수량% s 명세에',
"Select a Room from the list or click 'Create Room'": "목록에서 미팅룸을 선택하십시오 ' 추가하십시오 미팅룸 '",
"Select a person in charge for status 'assigned'": "지정된 ' 상태 ' 에 대한 사용자 선택",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "이 경우 모든 특정 위치는 위치 계층 의 최상위 레벨 상위 선택하십시오. 예를 들어, ' 특별지방자치단체 ' 계층 작은 디비전으로, 모든 특정 위치를 특별지방자치단체 상위로 않아도 됩니다.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "이 경우 모든 특정 위치는 위치 계층 구조의 상위 위치를 선택하십시오. 영향받는 이 영역을 표시하는 ' region ' 설정하는 데 도움이 됩니다.",
"Sorry, things didn't get done on time.": '죄송합니다, 같은 시간에 수행되지 않았습니다.',
"Sorry, we couldn't find that page.": '죄송합니다. 이 페이지를 찾을 수 없습니다.',
"System's Twitter account updated": '시스템 twitter 갱신된 계정',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "(doner (s) 이 프로젝트. 다중 값 ' 제어 ' 키를 선택할 수 있습니다.",
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '이미지 파일의 url. 이미지 파일 업로드 사용하지 않을 경우, 해당 위치를 지정해야 합니다.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "사용자 이름으로 검색하려면, 첫 번째, 중간 또는 마지막 이름을 입력하십시오, 공백으로 구분됩니다. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 개인을 나열하십시오.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "본문에 대한 검색, 본문의 id 태그를 입력하십시오. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 본체를 모두 나열하십시오.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "병원 에 대한 검색, 병원 의 이름 또는 id 중 하나를 입력하거나, 조직 이름 또는 약어, 공백으로 구분됩니다. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 병원 나열하십시오.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "병원 에 대한 검색, 병원 의 이름 또는 id 를 공백으로 구분됩니다. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 병원 나열하십시오.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "위치를 검색하려면 이름을 입력하십시오. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 위치 목록에 없습니다.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "사용자를 검색할 수 있는 경우, 첫 번째, 중간 또는 마지막 이름 및/또는 개인 id 번호 중 하나를 공백으로 구분하여 입력하십시오. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 개인을 나열하십시오.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "평가에 대한 검색, 어떤 부분이 평가의 티켓 번호를 입력하십시오. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 평가를 나열하십시오.",
"Type the first few characters of one of the Person's names.": '이름 중 첫 몇글자를 입력하세요.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '이미지 파일을 업로드하십시오. 이미지 파일 업로드 경우, url 필드에 해당 위치를 지정해야 합니다.',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '다른 때 데이터 동기화 충돌이 있는 경우 두 (또는 그 이상) 자가 이를 모두 수정, 즉 충돌 정보를 정보를 동기화할 때 발생합니다. 동기화 모듈은 충돌을 자동으로 해결할 수 시도하나 일부 경우에 이를 수 없습니다. 이러한 경우, 것은 사용자에게 이러한 충돌을 수동으로 해결할 수 없는 경우, 이 페이지로 이동할 수 있는 링크를 누르십시오.',
"You haven't made any calculations": '임의의 계산을 수행한 없음',
"couldn't be parsed so NetworkLinks not followed.": 'networklinks. 그 때문에 구문 분석할 수 없습니다.',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": '이 openlayers 아직 지원되지 않은 groundoverlay 또는 screenoverlay, 따라서 제대로 작동하지 않을 수 있습니다.',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '\\ " update\\ " \\ " field1=\'newvalue\'\\ " 와 같은 선택적 표현식입니다. 갱신하거나 조인의 결과를 삭제할 수 없습니다.',
'# of International Staff': '# 국제 직원',
'# of National Staff': '# 자국 인력을',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nif 요청 유형 "%(type)s", %(type)s 를 다음 화면에서 입력하십시오.',
'%(system_name)s - Verify Email': '%(system_name)s - 확인할 메일',
'%s rows deleted': '% s 행 삭제',
'%s rows updated': '% s 행',
'& then click on the map below to adjust the Lat/Lon fields': '& 맵핑 아래 lat/₩ 조정하려면 필드를 누르십시오',
'* Required Fields': '* 필수 필드',
'0-15 minutes': '0-15 분',
'1 Assessment': '1 평가',
'1 location, shorter time, can contain multiple Tasks': '1 위치, 짧은 시간에 여러 태스크를 포함할 수 있습니다',
'1-3 days': '1-3일',
'15-30 minutes': '15-30분',
'2 different options are provided here currently:': '2 다른 옵션을 현재 제공됩니다.',
'2x4 Car': '자동차 2x4',
'30-60 minutes': '30-60 분',
'4-7 days': '4-7 일',
'4x4 Car': '자동차 4x4',
'8-14 days': '8-14 일',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '개별 위치에 지정된 마커를 경우 필요한 기능을 클래스에 지정된 마커를 대체하기 위해 설정됩니다.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': '참조 문서 같은 파일로, url 또는 이 데이터 검증하십시오. 문서 1 의 이름 몇 자를 기존 문서를 링크할 수 있습니다.',
'A brief description of the group (optional)': '간략한 설명 (선택적)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'xml 파일 형식으로 지리적 위치 일련의 포함하는 gps 다운로드됩니다.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': '이 파일은 시간소인이 사진이 있는 시간소인이 있는 맵에서 찾을 상관될 수 있는 gps gpx 형식으로 가져옵니다.',
'A library of digital resources, such as photos, documents and reports': '라이브러리 자원 (예: 디지털 사진, 문서 및 보고서',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': '하나의 관리 region 내에 포함되지 않은 경우 위치 그룹 영향받는 영역의 범위를 정의하는 데 사용할 수 있습니다.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': '위치 그룹 위치 세트 (종종 결합된 영역을 나타내는 관리 region 세트입니다).',
'A location group must have at least one member.': '위치 그룹에는 적어도 하나의 구성원이 있어야 합니다.',
'ABOUT THIS MODULE': '모듈 정보',
'ACCESS DATA': '액세스 데이터',
'ANY': '모두',
'API is documented here': 'api 여기에 설명되어 있습니다',
'ATC-20 Rapid Evaluation modified for New Zealand': 'atc-20 빠른 평가 뉴질랜드 수정된',
'Abbreviation': '약어',
'Ability to Fill Out Surveys': '체크아웃 기능을 서베이를 채울 수',
'Ability to customize the list of details tracked at a Shelter': '기능 shelter 추적되는 상세 목록을 사용자 정의할 수',
'Ability to customize the list of human resource tracked at a Shelter': '기능 shelter 추적되는 인적 자원 목록을 사용자 정의할 수',
'Ability to customize the list of important facilities needed at a Shelter': '기능은 shelter 필요한 중요한 기능의 목록을 사용자 수',
'Ability to view Results of Completed and/or partially filled out Surveys': '기능 완료 및/조사 결과 보기 또는 부분적으로 채워진 체크아웃할 수',
'About': '제품 정보',
'Access denied': '액세스 거부됨',
'Access to Shelter': 'shelter 에 액세스',
'Access to education services': '액세스 교육 서비스',
'Accessibility of Affected Location': '내게 필요한 옵션 받는 위치',
'Account Registered - Please Check Your Email': '계정 등록-전자 우편 확인',
'Acronym': '약어',
'Actionable by all targeted recipients': '가능한 모든 대상으로 받는',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '실행 가능한 유일한 에서 연습 참여자; id 표시되어야 하는 연습<note>',
'Actioned?': '실행되지?',
'Actions taken as a result of this request.': '이 요청의 결과로 취한 조치.',
'Actions': '조치',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': '해당 자원 (인력, 자산 및 설비) 할당을 시나리오 템플리트 활성화를 이벤트.',
'Active Problems': '활성 문제',
'Activities matching Assessments:': '활동 평가 일치:',
'Activities of boys 13-17yrs before disaster': '활동 boys 재해 전에 13-17yrs',
'Activities of boys 13-17yrs now': '활동 boys 13-17yrs 이제',
'Activities of boys <12yrs before disaster': '활동 boys <12yrs 재해 전에',
'Activities of boys <12yrs now': 'boys <12yrs 활동 이제',
'Activities of children': '하위 활동',
'Activities of girls 13-17yrs before disaster': '활동 girls 13-17yrs 재해 전에',
'Activities of girls 13-17yrs now': 'girls 13-17yrs 활동 이제',
'Activities of girls <12yrs before disaster': '활동 girls <12yrs 재해 전에',
'Activities of girls <12yrs now': 'girls <12yrs 활동 이제',
'Activities': '액티비티',
'Activities:': '활동:',
'Activity Added': '활동 추가',
'Activity Deleted': '활동 삭제',
'Activity Details': '활동 세부사항',
'Activity Report': '활동 보고서',
'Activity Reports': '활동 보고서',
'Activity Type': '활동 유형',
'Activity Updated': '갱신된 활동',
'Activity': '활동',
'Add Address': '주소 추가',
'Add Alternative Item': '대체 항목 추가',
'Add Assessment Summary': '추가 평가 요약',
'Add Assessment': '평가 추가',
'Add Asset Log Entry - Change Label': '로그 항목-자산 변경할 레이블 추가',
'Add Availability': '추가 가용성',
'Add Baseline Type': '기준선 추가 유형',
'Add Baseline': '기준선 추가',
'Add Bundle': '번들 추가',
'Add Camp Service': '자녀를 서비스 추가',
'Add Camp Type': '자녀를 유형 추가',
'Add Camp': '자녀를 추가',
'Add Certificate for Course': 'certicate 코스 추가',
'Add Certification': '인증 추가',
'Add Competency': '능력 추가',
'Add Contact Information': '문의처 정보 추가',
'Add Credential': '권한 정보 추가',
'Add Credentials': '권한 정보 추가',
'Add Disaster Victims': '피해 희생 추가',
'Add Distribution.': '분배 추가.',
'Add Donor': '추가 공여',
'Add Flood Report': '플러드 보고서 추가',
'Add Group Member': '그룹 구성원 추가',
'Add Human Resource': '인적 자원 추가',
'Add Identity': 'id 추가',
'Add Image': '이미지 추가',
'Add Impact Type': '추가 영향 유형',
'Add Impact': '영향 추가',
'Add Item to Catalog': '카탈로그 항목 추가',
'Add Item to Commitment': '때,확약 항목 추가',
'Add Item to Inventory': '재고 항목 추가',
'Add Item to Request': '항목 추가 요청',
'Add Item to Shipment': '운송 항목에 대한 추가',
'Add Item': '항목 추가',
'Add Job Role': '작업 역할 추가',
'Add Key': '키 추가',
'Add Kit': '추가 kit',
'Add Level 1 Assessment': '레벨 1 평가 추가',
'Add Level 2 Assessment': '레벨 2 평가 추가',
'Add Log Entry': '로그 항목 추가',
'Add Member': '회원 추가',
'Add Membership': '멤버십 추가',
'Add Message': '메시지 추가',
'Add Mission': '추가 mission',
'Add Need Type': '필요한 추가 유형',
'Add Need': '필요한 추가',
'Add New Assessment Summary': '새 평가 요약',
'Add New Assessment': '새 평가 추가',
'Add New Baseline Type': '새 기준선 유형 추가',
'Add New Baseline': '새 기준선 추가',
'Add New Budget': '새 예산 추가',
'Add New Bundle': '새 번들 추가',
'Add New Camp Service': '새 자녀를 서비스 추가',
'Add New Camp Type': '새 자녀를 유형 추가',
'Add New Camp': '새 자녀를 추가',
'Add New Cluster Subsector': '새 클러스터 subsector 추가',
'Add New Cluster': '새 클러스터 추가',
'Add New Commitment Item': '추가할 새 항목은 확약',
'Add New Document': '새 문서 추가',
'Add New Donor': '새 제공자 추가',
'Add New Entry': '새 항목 추가',
'Add New Event': '새 이벤트 추가',
'Add New Flood Report': '새 범람 보고서 추가하기',
'Add New Human Resource': '추가할 새 인적 자원',
'Add New Image': '새 이미지 추가하기',
'Add New Impact Type': '새 영향 유형 추가',
'Add New Impact': '새 영향 추가',
'Add New Item to Kit': '새 항목 추가 로 kit',
'Add New Key': '새 키 추가',
'Add New Level 1 Assessment': '새 레벨 1 평가 추가',
'Add New Level 2 Assessment': '새 레벨 2 평가 추가',
'Add New Member': '새 멤버 추가',
'Add New Membership': '새 구성원 추가',
'Add New Need Type': '새 하는 유형 추가',
'Add New Need': '새 추가 합니다',
'Add New Population Statistic': '새 인구 통계 추가',
'Add New Problem': '새 문제점 추가',
'Add New Rapid Assessment': '추가할 새 신속한 평가',
'Add New Received Item': '수신된 새 항목 추가',
'Add New Record': '새 레코드 추가',
'Add New Report': '새 보고서 추가',
'Add New Request Item': '새 품목 요청',
'Add New Request': '새 요청 추가',
'Add New River': '새 river 추가',
'Add New Role to User': '새 역할에 사용자 추가',
'Add New Scenario': '새 시나리오 추가',
'Add New Sent Item': '새 보낸 항목 추가',
'Add New Setting': '새 설정 추가',
'Add New Solution': '새 솔루션 추가',
'Add New Staff Type': '새 직원 유형 추가',
'Add New Subsector': '새 subsector 추가',
'Add New Survey Answer': '새 설문지 응답 추가',
'Add New Survey Question': '새 설문지 질문 추가',
'Add New Survey Series': '새 설문지 시리즈 추가',
'Add New Survey Template': '새 서베이 템플리트 추가',
'Add New Team': '새 팀 추가',
'Add New Ticket': '새 티켓 추가',
'Add New Track': '추가할 새 추적',
'Add New User to Role': '새 사용자 역할 추가',
'Add New': '새로 추가',
'Add Peer': '피어 추가',
'Add Person': '사용자 추가',
'Add Photo': '사진 추가',
'Add Population Statistic': '인구 통계 추가',
'Add Position': '위치 추가',
'Add Problem': '추가 문제점',
'Add Question': '질문 추가',
'Add Rapid Assessment': '빠른 평가 추가',
'Add Record': '레코드 추가',
'Add Report': '보고서 추가',
'Add Request': '요청 추가',
'Add River': '추가 강',
'Add Section': '섹션 추가',
'Add Setting': '설정 추가',
'Add Skill Equivalence': '기술 반복기에 추가',
'Add Skill Provision': '기술 프로비저닝하려면 추가',
'Add Solution': '솔루션 추가',
'Add Staff Type': '추가 직원 유형',
'Add Subscription': '등록 추가',
'Add Subsector': '추가 subsector',
'Add Survey Answer': '서베이 응답 추가',
'Add Survey Question': '서베이 질문 추가',
'Add Survey Series': '추가 조사 시리즈',
'Add Survey Template': '서베이 템플리트 추가',
'Add Team Member': '회원 추가',
'Add Team': '팀 추가',
'Add Ticket': '티켓 추가',
'Add Training': '교육 추가',
'Add Unit': '단위 추가',
'Add Volunteer Availability': '지원자 가용성 추가',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': '같은 파일 참조 문서 추가, url 또는 이 데이터 검증하십시오. 참조 문서 입력하지 않으면, 대신 표시됩니다.',
'Add a Volunteer': '를 자발적으로 추가',
'Add a new certificate to the catalog.': '새 인증 카탈로그에 추가하십시오.',
'Add a new competency rating to the catalog.': '새 능력 등급 카탈로그에 추가하십시오.',
'Add a new course to the catalog.': '새 과정 카탈로그에 추가하십시오.',
'Add a new job role to the catalog.': '카탈로그에 새 작업 역할을 추가하십시오.',
'Add a new skill provision to the catalog.': '새로운 기술 프로비저닝 카탈로그에 추가하십시오.',
'Add a new skill to the catalog.': '새 항목을 추가하려면.',
'Add a new skill type to the catalog.': '새 항목 유형 카탈로그에 추가하십시오.',
'Add new Group': '새 그룹 추가',
'Add new Individual': '새 개별 추가',
'Add new project.': '새 프로젝트를 추가하십시오.',
'Add staff members': '스태프 구성원 추가',
'Add to Bundle': '번들에 추가',
'Add to budget': '에 예산 추가',
'Add volunteers': 'volunteers 추가',
'Add/Edit/Remove Layers': '추가/편집/계층 제거',
'Added to Group': '구성원 추가',
'Added to Team': '구성원 추가',
'Additional Beds / 24hrs': '추가 의료용/24hrs',
'Address Details': '주소 상세정보',
'Address Type': '주소 유형',
'Address added': '주소 추가',
'Address deleted': '주소 삭제',
'Address updated': '주소 갱신',
'Address': '주소',
'Addresses': '주소',
'Adequate food and water available': '적합한 식품 워터마크 사용',
'Adequate': '적절한',
'Admin Email': '관리자 전자 우편',
'Admin Name': 'Admin 이름',
'Administration': '관리',
'Adolescent (12-20)': 'adolescent (12-20)',
'Adolescent participating in coping activities': 'adolescent 활동에 참여하는 복사',
'Adult (21-50)': '성인 (21-50)',
'Adult ICU': '성인 icu',
'Adult Psychiatric': '성인 psychiatric',
'Adult female': '성인 여성',
'Adult male': '성인 남성',
'Adults in prisons': 'adults prisons 에서',
'Advanced:': '고급:',
'Advisory': '보안 권고문',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': '이 단추를 누른 후, 쌍체 항목 세트를 하나씩 표시됩니다. 참고로, 다른 원하는 각 쌍에서 하나의 솔루션을 선택하십시오.',
'Age Group': '연령 그룹',
'Age group does not match actual age.': '연령 그룹 실제 나이 일치하지 않습니다.',
'Age group': '연령 그룹',
'Aggravating factors': 'aggravating 요소',
'Agriculture': '농업',
'Air Transport Service': 'air transport 서비스',
'Aircraft Crash': '항공기 충돌',
'Aircraft Hijacking': '항공기 하이잭이라고',
'Airport Closure': '공항 처리완료',
'Airspace Closure': 'airspace 마감',
'Alcohol': '알코올',
'Alert': '경보',
'All Inbound & Outbound Messages are stored here': '모든 인바운드 및 아웃바운드 메시지를 여기에 저장됩니다',
'All Resources': '모든 자원',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': '이 사이트에서 sahana software foundation 에서 제공하는 모든 데이터를 창의적 commons attribution 라이센스 하에서 licenced. 그러나, 모든 데이터 비롯됩니다. 각 항목의 소스 필드를 참조하십시오.',
'Allowed to push': '누름 수',
'Allows a Budget to be drawn up': '그릴 수 있게 예산',
'Allows authorized users to control which layers are available to the situation map.': '사용자가 허용하는 계층을 사용할 수 있는 상황 맵핑할 제어할 수 있습니다.',
'Alternative Item Details': '대체 항목 세부사항',
'Alternative Item added': '대체 항목 추가됨',
'Alternative Item deleted': '대체 항목 삭제',
'Alternative Item updated': '대체 항목 갱신',
'Alternative Item': '대체 품목',
'Alternative Items': '대체 항목',
'Alternative places for studying': '대체 연구하여 대한 작업공간',
'Ambulance Service': 'ambulance 서비스',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '흡입구 (시스템, 웨어하우스 관리 시스템, 상품 추적, 공급망 관리, 조달 및 기타 자산 및 자원 관리 기능을 제공합니다.',
'An item which can be used in place of another item': '다른 항목 대신 사용할 수 있는 항목',
'Analysis of Completed Surveys': '분석 완료 조사 중',
'Animal Die Off': '동물 off die',
'Animal Feed': '피드 동물',
'Antibiotics available': 'antibiotics 사용',
'Antibiotics needed per 24h': 'antibiotics 24h 당 필요한',
'Apparent Age': '피상 연령',
'Apparent Gender': '피상 성별',
'Application Deadline': '어플리케이션 최종 기한',
'Approve': '승인',
'Approved': '승인된 날짜',
'Approver': '승인자',
'Arctic Outflow': 'arctic outflow',
'Areas inspected': '영역 검사',
'Assessment Details': '평가 세부사항',
'Assessment Reported': '보고된 평가',
'Assessment Summaries': '평가 요약',
'Assessment Summary Details': '평가 요약 세부사항',
'Assessment Summary added': '추가된 평가 요약',
'Assessment Summary deleted': '삭제된 평가 요약',
'Assessment Summary updated': '갱신된 평가 요약',
'Assessment added': '평가 추가',
'Assessment admin level': '평가 관리 레벨',
'Assessment deleted': '평가 삭제',
'Assessment timeline': '시간선 평가',
'Assessment updated': '갱신된 평가',
'Assessment': '평가',
'Assessments Needs vs. Activities': '평가 vs. 하는 활동',
'Assessments and Activities': '평가 및 활동',
'Assessments': '평가',
'Assessments:': '평가:',
'Assessor': '평가자',
'Asset Details': '자산 세부사항',
'Asset Log Details': '자산 세부사항 로그',
'Asset Log Empty': '자산 빈 로그',
'Asset Log Entry Added - Change Label': '로그 항목-자산 변경할 레이블 추가',
'Asset Log Entry deleted': '자산 로그 항목 삭제',
'Asset Log Entry updated': '자산 로그 항목 갱신',
'Asset Log': '자산 로그',
'Asset Management': '자산 관리',
'Asset Number': '자산 번호',
'Asset added': '자산 추가됨',
'Asset deleted': '삭제된 자산',
'Asset removed': '제거된 자산',
'Asset updated': '자산 업데이트됨',
'Asset': '자산',
'Assets are resources which are not consumable but are expected back, so they need tracking.': '자산을 이용 않았으나 예상되는 경우 자원, 트래킹 합니다.',
'Assets': '자산',
'Assign Group': '그룹 지정',
'Assign Staff': '스태프 지정',
'Assign to Org.': '조직 할당하십시오.',
'Assign to Organization': '조직 지정',
'Assign to Person': '사용자 지정',
'Assign to Site': '사이트 지정',
'Assign': '지정',
'Assigned By': '지정한',
'Assigned To': '지정 대상',
'Assigned to Organization': '지정된 조직',
'Assigned to Person': '지정된 사용자',
'Assigned to Site': '지정된 사이트',
'Assigned to': '지정 대상',
'Assigned': '지정됨',
'At/Visited Location (not virtual)': '/방문한 위치 (가상)',
'Attend to information sources as described in <instruction>': '참석 정보가 소스에 에 설명된 대로<instruction>',
'Attribution': 'attribution',
'Author': '작성자',
'Availability': '가용성',
'Available Alternative Inventories': '사용 명세를 대체',
'Available Beds': '사용 가능한 의료용',
'Available Inventories': '사용 가능한 자원',
'Available Messages': '사용 가능한 메시지',
'Available Records': '사용 가능한 레코드',
'Available databases and tables': '데이터베이스 및 테이블 사용',
'Available for Location': '사용 위치',
'Available from': '사용 가능 원본',
'Available in Viewer?': '사용 표시기에서?',
'Available until': '사용 가능한 최종 시간',
'Avalanche': 'avalanche',
'Avoid the subject event as per the <instruction>': '주제 이벤트 대로 당 피하기<instruction>',
'Background Color for Text blocks': '텍스트 블록의 배경 색상',
'Background Color': '배경색',
'Bahai': '바하이',
'Baldness': '탈모',
'Banana': '바나나',
'Bank/micro finance': '은행/마이크로 파이낸스',
'Barricades are needed': 'barricades 필요',
'Base Layer?': '기본 ssl?',
'Base Location': '기본 위치',
'Base Site Set': '기본 사이트 설정',
'Baseline Data': '기준선 데이터',
'Baseline Number of Beds': '기준선 번호 의료용 중',
'Baseline Type Details': '기준선 유형 세부사항',
'Baseline Type added': '기준선 유형 추가',
'Baseline Type deleted': '기준선 유형 삭제',
'Baseline Type updated': '기준선 유형 갱신',
'Baseline Type': '기준선 유형',
'Baseline Types': '기준선 유형',
'Baseline added': '기준선 추가',
'Baseline deleted': '기준선 삭제',
'Baseline number of beds of that type in this unit.': '이 유형의 의료용 기준선 번호.',
'Baseline updated': '기준선 갱신',
'Baselines Details': '기준선 세부사항',
'Baselines': '기준선',
'Basic Assessment Reported': '기본 평가에서 보고된',
'Basic Assessment': '기본 평가',
'Basic Details': '기본 세부사항',
'Basic reports on the Shelter and drill-down by region': '기본, shelter 및 drill-down region 에 대한 보고서',
'Baud rate to use for your modem - The default is safe for most cases': '전송 속도를 사용자 모뎀의-기본 사용할 대부분의 스레드세이프인지',
'Baud': '보오율',
'Beam': '빔',
'Bed Capacity per Unit': 'bed 용량 단위',
'Bed Capacity': 'bed 용량',
'Bed Type': 'bed 유형',
'Bed type already registered': 'bed 유형이 이미 등록되었습니다.',
'Below ground level': '아래 접지선 레벨',
'Beneficiary Type': '수혜자입니다 유형',
'Biological Hazard': '생물학 위험',
'Biscuits': 'biscuits',
'Blizzard': 'blizzard',
'Blood Type (AB0)': '혈액 유형 (AB0)',
'Blowing Snow': 'blowing 눈',
'Boat': 'boat',
'Bodies found': '본문을 찾을 수 없음',
'Bodies recovered': '복구된 단체',
'Body Recovery Request': '본문 복구 요청',
'Body Recovery Requests': '본문 복구 요청',
'Body': 'body',
'Bomb Explosion': '폭발 bomb',
'Bomb Threat': 'bomb 위협',
'Bomb': 'bomb',
'Border Color for Text blocks': '경계 색상 텍스트 블록',
'Brand Details': '브랜드 세부사항',
'Brand added': '브랜드 추가',
'Brand deleted': '브랜드 삭제',
'Brand updated': '갱신된 브랜드',
'Brand': '브랜드',
'Brands': '브랜드',
'Bricks': 'bricks',
'Bridge Closed': '브릿지 닫힘',
'Bucket': '버킷',
'Buddhist': '불교식 달력',
'Budget Details': '예산 세부사항',
'Budget Updated': '갱신된 예산',
'Budget added': '예산 추가',
'Budget deleted': '예산 삭제',
'Budget updated': '갱신된 예산',
'Budget': '예산',
'Budgeting Module': '모듈 예산',
'Budgets': '예산',
'Buffer': '버퍼',
'Bug': '버그',
'Building Assessments': '빌드 평가',
'Building Collapsed': '빌드 접힌',
'Building Name': '빌딩 이름',
'Building Safety Assessments': '빌드 안전 평가',
'Building Short Name/Business Name': '빌드 짧은 이름/비즈니스 이름',
'Building or storey leaning': '빌드 또는 storey leaning',
'Built using the Template agreed by a group of NGOs working together as the': '템플리트 동의된 ngos 그룹에서 함께 사용하여 작업',
'Bulk Uploader': '벌크 uploader',
'Bundle Contents': '번들 컨텐츠',
'Bundle Details': 'Bundle 세부사항',
'Bundle Updated': '번들 갱신',
'Bundle added': '번들 추가',
'Bundle deleted': '번들 삭제',
'Bundle updated': '번들 갱신',
'Bundle': '번들',
'Bundles': '번들',
'Burn ICU': '충전하지 icu',
'Burn': '소모시키다',
'Burned/charred': '하드코드된/charred',
'By Facility': '기능에 의해',
'By Inventory': '자원 명세',
'CBA Women': 'cba 여성',
'CSS file %s not writable - unable to apply theme!': 'css 파일% s not installed — unable 테마를 적용할 쓰기-!',
'Calculate': '계산',
'Camp Coordination/Management': '자녀를 조정/관리',
'Camp Details': '자녀를 세부사항',
'Camp Service Details': '서비스 세부사항 캠프',
'Camp Service added': '자녀를 서비스 추가',
'Camp Service deleted': '자녀를 서비스 삭제',
'Camp Service updated': '자녀를 서비스 갱신',
'Camp Service': '자녀를 서비스',
'Camp Services': '자녀를 서비스',
'Camp Type Details': '자녀를 유형 세부사항',
'Camp Type added': '자녀를 유형 추가',
'Camp Type deleted': '자녀를 유형 삭제',
'Camp Type updated': '자녀를 유형 갱신',
'Camp Type': '자녀를 유형',
'Camp Types and Services': '자녀를 유형 및 서비스',
'Camp Types': '자녀를 유형',
'Camp added': '추가된 캠프',
'Camp deleted': '캠프 삭제됨',
'Camp updated': '갱신된 캠프',
'Camp': '캠프',
'Camps': 'camps',
'Can only disable 1 record at a time!': '한 번에 1 레코드 사용 불가능하게 할 수 있습니다.',
'Cancel Log Entry': '로그 항목 취소',
'Cancel Shipment': '선적 취소',
'Cancel': 'CANCEL(취소)',
'Canceled': '취소됨',
'Candidate Matches for Body %s': '후보자가 신체 %s 에 일치합니다',
'Canned Fish': '통조림에 든 생선',
'Cannot be empty': '비어있을 수 없음',
'Cannot disable your own account!': '사용자 고유 계정을 사용 불가능하게 할 수 없습니다.',
'Capacity (Max Persons)': '용량 (max 명)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'capture 정보 피해 희생 (tourists, passengers, 제품군에서 등) )',
'Capture Information on each disaster victim': 'capture 정보 각 피해 희생 (victim)',
'Capturing the projects each organization is providing and where': '각 프로젝트 제공하는 조직 및 캡처',
'Cardiology': 'cardiology',
'Cassava': 'cassava',
'Casual Labor': '일반 작업',
'Casualties': 'casualties',
'Catalog Details': '카탈로그 세부사항',
'Catalog Item added': '카탈로그 항목 추가',
'Catalog Item deleted': '카탈로그 항목 삭제',
'Catalog Item updated': '카탈로그 항목 갱신',
'Catalog Items': '카탈로그 항목',
'Catalog added': '카탈로그 추가',
'Catalog deleted': '카탈로그 삭제',
'Catalog updated': '카탈로그 갱신',
'Catalog': '카탈로그',
'Catalogs': '카탈로그',
'Categories': '범주',
'Category': '카테고리',
'Ceilings, light fixtures': 'ceilings, 표시등이 fixtures',
'Central point to record details on People': '중앙 사용자 레코드에 대한 자세한 내용은',
'Certificate Catalog': '인증서 카탈로그',
'Certificate Details': '인증서 세부사항',
'Certificate Status': '인증서 상태',
'Certificate added': '인증서 추가',
'Certificate deleted': '인증서가 삭제됨',
'Certificate updated': '인증서 갱신됨',
'Certificate': '인증',
'Certificates': '인증서',
'Certification Details': '인증 세부사항',
'Certification added': '인증 추가',
'Certification deleted': '인증 삭제',
'Certification updated': '인증 갱신',
'Certification': '인증',
'Certifications': '인증',
'Certifying Organization': '조직 인증',
'Change Password': '암호 변경',
'Check Request': '요청 확인',
'Check for errors in the URL, maybe the address was mistyped.': 'url 에 오류, maybe 주소를 잘못 확인하십시오.',
'Check if the URL is pointing to a directory instead of a webpage.': 'url 디렉토리 대신 웹 가리키는지 확인하십시오.',
'Check outbox for the message status': '메시지 상태를 outbox 확인',
'Check to delete': '삭제하려면 선택하십시오.',
'Checked': '확인',
'Checklist created': '체크리스트 작성',
'Checklist deleted': '삭제할 체크리스트',
'Checklist of Operations': '운영 점검 목록',
'Checklist updated': '갱신 점검',
'Checklist': '체크리스트',
'Chemical Hazard': '화학적 위험',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '화학적, 생물학적, 방사능, 핵무기 또는 고성능 폭발물 위협 또는 공격',
'Chicken': '닭고기',
'Child (2-11)': '하위 (2-11)',
'Child (< 18 yrs)': '하위 (< 18 세의)',
'Child Abduction Emergency': '하위 abduction 비상',
'Child headed households (<18 yrs)': '하위 머리 households (<18 세)',
'Child': '하위',
'Children (2-5 years)': '하위 (2-5 년)',
'Children (5-15 years)': '하위 (5-15 년)',
'Children (< 2 years)': '하위 (< 2 년)',
'Children in adult prisons': '하위 prisons 에 성인',
'Children in boarding schools': '하위 boarding 학교 에서',
'Children in homes for disabled children': '하위 홈은 의 하위에 대한 사용',
'Children in juvenile detention': '하위 juvenile detention)',
'Children in orphanages': '하위 orphanages 에',
'Children living on their own (without adults)': '하위 자신의 활성 (adults)',
'Children not enrolled in new school': '새 하위 학교 등록되어',
'Children orphaned by the disaster': '하위 피해 의해 분리되었으며',
'Children separated from their parents/caregivers': '하위에 상위/caregivers 구분됩니다',
'Children that have been sent to safe places': '안전한 위치에 전송된 하위',
'Children who have disappeared since the disaster': '누가 피해 이후 사라진 하위',
'Chinese (Taiwan)': '대만어',
'Cholera Treatment Capability': 'cholera 처리 기능',
'Cholera Treatment Center': 'cholera 진료 센터',
'Cholera Treatment': 'cholera 처리',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '새 평가 및 팀 판단에 따라 새 게시를 선택하십시오. 전체 빌드 영향을 미치는 심각한 조건을 통지일 안전하지 의 접지. 로컬화된 심각한 전반적인 중간 제한 조건을 사용해야 할 수도 있습니다. 작업공간 검사된 기본 시작 시 placard. post 모든 중요한 진입점을 전혀 다른 placards.',
'Christian': '서기',
'Church': '교회',
'City': 'city',
'Civil Emergency': 'civil 비상',
'Cladding, glazing': 'cladding, glazing',
'Click on the link %(url)s to reset your password': '링크를 누르십시오. %(url)s 사용자 암호 재설정',
'Click on the link %(url)s to verify your email': '링크를 누르십시오. %(url)s 사용자의 전자 검증하십시오',
'Clinical Laboratory': '임상 연구소',
'Clinical Operations': '임상 조작',
'Clinical Status': '임상 상태',
'Closed': '닫힘',
'Clothing': '의류',
'Cluster Details': '클러스터 세부사항',
'Cluster Distance': '클러스터 거리',
'Cluster Subsector Details': '클러스터 subsector 세부사항',
'Cluster Subsector added': '클러스터 하부영역 추가',
'Cluster Subsector deleted': '클러스터 하부영역 삭제',
'Cluster Subsector updated': '클러스터 subsector 갱신',
'Cluster Subsector': '클러스터 하부영역',
'Cluster Subsectors': '클러스터 subsectors',
'Cluster Threshold': '클러스터 임계값',
'Cluster added': '클러스터 추가',
'Cluster deleted': '클러스터 삭제',
'Cluster updated': '클러스터 갱신',
'Cluster(s)': '클러스터(들)',
'Clusters': '클러스터',
'Code': '코드',
'Cold Wave': '콜드 물결선',
'Collapse, partial collapse, off foundation': '접기, 부분 접기, foundation',
'Collective center': '콜렉티브에 center',
'Color for Underline of Subheadings': 'color 의 하위 underline 대한',
'Color of Buttons when hovering': '단추를 color 때 풍선',
'Color of bottom of Buttons when not pressed': '아래 단추 중 color 않을 때 눌렀습니다.',
'Color of bottom of Buttons when pressed': '아래 단추 중 color 때',
'Color of dropdown menus': '색상 드롭 다운 메뉴',
'Color of selected Input fields': 'color 선택한 입력 필드',
'Color of selected menu items': 'color 선택된 메뉴 항목',
'Columns, pilasters, corbels': '컬럼, pilasters, corbels',
'Combined Method': '결합 메소드',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '나중에 제공됩니다. 이 사이트를 방문하여 모든 는 동일한 문제점을 경험하는 것처럼.',
'Come back later.': '나중에 제공됩니다.',
'Commercial/Offices': 'commercial/사무실',
'Commit Date': '확약 날짜',
'Commit from %s': '커미트% s',
'Commit': '확약',
'Commit. Status': '확약 STATUS',
'Commiting a changed spreadsheet to the database': '데이터베이스에 변경된 스프레드시트 확약',
'Commitment Added': '추가 확약',
'Commitment Canceled': '확약 취소됨',
'Commitment Details': '위탁 세부사항',
'Commitment Item Details': '확약 항목 세부사항',
'Commitment Item added': '확약 항목 추가됨',
'Commitment Item deleted': '확약 항목 삭제',
'Commitment Item updated': '확약 항목 갱신',
'Commitment Items': '확약 항목',
'Commitment Status': '확약 상태',
'Commitment Updated': '확약 갱신',
'Commitment': '약정',
'Commitments': '약정',
'Committed By': '커미트된 의해',
'Committed': '커미트됨',
'Committing Inventory': '자원 확약',
'Communication problems': '통신 문제점',
'Community Centre': '커뮤니티 centre',
'Community Health Center': '커뮤니티 health center',
'Community Member': '커뮤니티 구성원',
'Competencies': '능력',
'Competency Details': '자격 세부사항',
'Competency Rating Catalog': '능력 등급 카탈로그',
'Competency Rating Details': '능력 평가 세부사항',
'Competency Rating added': '능력 등급 추가됩니다',
'Competency Rating deleted': '능력 등급 삭제됨',
'Competency Rating updated': '능력 등급 갱신',
'Competency Ratings': '정격 능력',
'Competency added': '능력 추가',
'Competency deleted': '능력 삭제',
'Competency updated': '갱신된 능력',
'Competency': '능력',
'Complete': 'COMPLETE(완료)',
'Completed': '완료됨',
'Compose': '작성',
'Compromised': '손상됨',
'Concrete frame': '콘크리트 프레임',
'Concrete shear wall': '콘크리트 절단 벽',
'Condition': '조건',
'Configurations': '구성',
'Configure Run-time Settings': '런타임 구성 설정',
'Confirm Shipment Received': '수신된 운송물 확인',
'Confirmed': '확인됨',
'Confirming Organization': '조직 확인',
'Conflict Details': '충돌 세부사항',
'Conflict Resolution': '충돌 해결',
'Consignment Note': '상품인지를 주',
'Constraints Only': '제한조건은',
'Consumable': '소비재',
'Contact Data': '데이터 문의하십시오.',
'Contact Details': '연락처 세부사항',
'Contact Info': '연락처 정보',
'Contact Information Added': '문의처 정보 추가',
'Contact Information Deleted': '정보는 삭제된 담당자',
'Contact Information Updated': '갱신된 접속 정보를',
'Contact Information': '연락처 정보',
'Contact Method': '연락 방법',
'Contact Name': '담당자 이름',
'Contact Person': '개인 연락처',
'Contact Phone': '담당자 전화',
'Contact details': '연락처 세부사항',
'Contact information added': '추가된 연락처 정보',
'Contact information deleted': '정보는 삭제된 담당자',
'Contact information updated': '갱신된 접속 정보를',
'Contact us': '문의',
'Contact': '연락처',
'Contacts': '연락처',
'Contents': '내용',
'Contributor': '기고자',
'Conversion Tool': '변환 도구',
'Cooking NFIs': '요리용 NFI',
'Cooking Oil': '요리용 오일',
'Coordinate Conversion': '좌표 변환',
'Coping Activities': '활동 복사',
'Copy': '복사',
'Corn': '옥수수',
'Cost Type': '비용 유형',
'Cost per Megabyte': '비용 한계',
'Cost per Minute': '분당 비용',
'Country of Residence': '거주 국가',
'Country': '국가',
'County': 'County(US 전용)',
'Course Catalog': '과정 카탈로그',
'Course Certificate Details': 'certicate 과정 세부사항',
'Course Certificate added': 'certicate 코스 추가',
'Course Certificate deleted': '코스 삭제 certicate',
'Course Certificate updated': '물론 certicate 갱신',
'Course Certificates': '과정 인증서',
'Course Details': '과정 세부사항',
'Course added': '과정 추가',
'Course deleted': '코스 삭제',
'Course updated': '과정 갱신',
'Course': '과정',
'Courses': '과정',
'Create & manage Distribution groups to receive Alerts': '작성 및 관리 경고를 수신하도록 분배 그룹',
'Create Activity Report': '활동 보고서 추가',
'Create Activity Type': '추가 활동 유형',
'Create Activity': '단위업무 추가',
'Create Asset': '자산 추가',
'Create Bed Type': '추가 bed 유형',
'Create Brand': '브랜드 추가',
'Create Budget': '예산 추가',
'Create Catalog Item': '카탈로그 항목 추가',
'Create Catalog': '카탈로그 추가',
'Create Certificate': '인증 추가',
'Create Checklist': '체크리스트 작성',
'Create Cholera Treatment Capability Information': 'cholera treatment 기능 정보 추가',
'Create Cluster Subsector': '클러스터 subsector 추가',
'Create Cluster': '클러스터 추가',
'Create Competency Rating': '능력 등급 추가',
'Create Contact': '연락처 추가',
'Create Course': '과정 추가',
'Create Dead Body Report': '데드 본문 보고서 추가',
'Create Event': '새 이벤트 작성',
'Create Facility': '기능 추가',
'Create Feature Layer': '추가 기능)',
'Create Group Entry': '그룹 항목 작성',
'Create Group': '그룹 추가',
'Create Hospital': '추가 병원',
'Create Identification Report': '식별 보고서 추가',
'Create Impact Assessment': '영향 평가 작성',
'Create Incident Report': '인시던트 보고서 추가',
'Create Incident': '추가 인시던트',
'Create Item Category': '항목에 카테고리 추가',
'Create Item Pack': '항목 팩 추가',
'Create Item': '새 항목 추가',
'Create Kit': '새 상품 추가',
'Create Layer': '계층 추가',
'Create Location': '위치 추가',
'Create Map Configuration': '맵 구성 추가',
'Create Marker': '마커 추가',
'Create Mobile Impact Assessment': '모바일 영향 평가 작성',
'Create Office': '추가 사무실',
'Create Organization': '조직 추가',
'Create Personal Effects': '개인 효과 추가',
'Create Project': '새 프로젝트 추가',
'Create Project': '프로젝트 추가',
'Create Projection': '추가 투영',
'Create Rapid Assessment': '신속한 평가 생성하기',
'Create Reference Document': '참조 문서 추가',
'Create Request': '요청 생성하기',
'Create Resource': '자원 추가',
'Create Role': '역할 추가',
'Create Room': '강의실 추가',
'Create Scenario': '새 시나리오 작성',
'Create Sector': '섹터를 추가',
'Create Service Profile': '서비스 프로파일 추가',
'Create Shelter Service': '추가 shelter 서비스',
'Create Shelter Type': 'shelter 유형 추가',
'Create Shelter': '추가 shelter',
'Create Skill Type': '추가 기술 항목 유형',
'Create Skill': '스킬 추가',
'Create Staff Member': '스태프 구성원 추가',
'Create Status': '상태 추가',
'Create Task': '작업 생성하기',
'Create Task': '태스크 추가',
'Create Theme': '테마 추가',
'Create User': '사용자 추가',
'Create Volunteer': '지원자 추가',
'Create Warehouse': '웨어하우스 추가',
'Create a Person': '개인 추가',
'Create a group entry in the registry.': '레지스트리에 있는 그룹 항목을 작성하십시오.',
'Create, enter, and manage surveys.': '작성, 입력, 관리하는 조사합니다.',
'Creation of Surveys': '설문 생성하기',
'Credential Details': '신임 세부사항',
'Credential added': 'Credential 추가',
'Credential deleted': 'Credential 삭제',
'Credential updated': '신임 갱신',
'Credentialling Organization': 'credentialling 조직',
'Credentials': '신임',
'Credit Card': '신용 카드',
'Crime': '범죄',
'Criteria': '기준',
'Currency': '통화',
'Current Entries': '현재 항목',
'Current Group Members': '현재 그룹 구성원',
'Current Identities': '현재 id',
'Current Location': '현재 위치',
'Current Log Entries': '현재 로그 항목',
'Current Memberships': '현재 멤버쉽',
'Current Records': '현재 레코드',
'Current Registrations': '현재 등록',
'Current Status': '현재 상태',
'Current Team Members': '현재 팀 구성원',
'Current Twitter account': '현재 twitter 계정',
'Current community priorities': '현재 커뮤니티 우선순위',
'Current general needs': '현재 일반 합니다',
'Current greatest needs of vulnerable groups': '현재 가장 필요한 취약한 그룹',
'Current health problems': '현재 성능 문제점',
'Current number of patients': '현재 환자 중',
'Current problems, categories': '현재 문제점, 카테고리',
'Current problems, details': '현재 문제점, 세부사항',
'Current request': '현재 요청',
'Current response': '현재 응답',
'Current session': '현재 세션',
'Currently no Certifications registered': '현재 등록된 인증서가 없습니다',
'Currently no Competencies registered': '현재 등록된 능력 항목이 없습니다',
'Currently no Course Certificates registered': '현재 등록된 교육과정 인증서가 없습니다',
'Currently no Credentials registered': '현재 등록된 신용(신분, 자격) 증명서가 없습니다',
'Currently no Missions registered': '현재 등록된 임무가 없습니다',
'Currently no Skill Equivalences registered': '현재 등록된 기술 종류가 없습니다',
'Currently no Trainings registered': 'trainings 현재 등록된',
'Currently no entries in the catalog': '현재 카탈로그에 항목이 없습니다',
'DNA Profile': '프로파일 dna',
'DNA Profiling': 'dna 프로파일링',
'Dam Overflow': 'dam 오버플로우',
'Damage': '손상',
'Dangerous Person': '위험한 사람',
'Dashboard': '대시보드',
'Data uploaded': '데이터 업로드',
'Data': '데이터',
'Database': '데이터베이스',
'Date & Time': '날짜 및 시간',
'Date Available': '운송 가능 날짜',
'Date Received': '수령 날짜',
'Date Requested': '요청된 날짜',
'Date Required': '요청 날짜',
'Date Sent': '날짜 송신',
'Date Until': '날짜',
'Date and Time': '날짜 및 시간',
'Date and time this report relates to.': '이 보고서는 날짜 및 시간 관련시킵니다.',
'Date of Birth': '생일',
'Date of Latest Information on Beneficiaries Reached': '날짜 받아야 에 대한 최신 정보',
'Date of Report': '보고서 날짜',
'Date/Time of Find': '날짜/시간 찾기',
'Date/Time when found': '날짜/시간 때',
'Date/Time when last seen': '날짜/시간 때 마지막으로 표시된',
'Date/Time': '날짜/시간',
'Dead Body Details': '데드 본문 세부사항',
'Dead Body Reports': '데드 본문 보고서',
'Dead Body': '데드 본문',
'Dead body report added': '데드 본문 보고서 추가',
'Dead body report deleted': '데드 본문 보고서 삭제',
'Dead body report updated': '데드 본문 보고서 갱신',
'Deaths in the past 24h': 'deaths 지난 24h',
'Decimal Degrees': '10진수(도)',
'Decision': '결정',
'Decomposed': '분해될',
'Default Height of the map window.': '기본 맵 창의 높이.',
'Default Map': '기본 맵',
'Default Marker': '디폴트 마커',
'Default Width of the map window.': '맵 창의 기본 너비.',
'Default synchronization policy': '기본 동기화 정책',
'Defecation area for animals': 'defecation 영역에 대한 동물',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': '해당 자원 (인력, 자산 및 설비) 의 할당 시나리오를 정의하십시오.',
'Defines the icon used for display of features on handheld GPS.': '휴대용 gps 의 기능을 표시하기 위해 사용되는 아이콘을 정의합니다.',
'Defines the icon used for display of features on interactive map & KML exports.': '대화식 맵 및 kml 내보내기 기능을 표시하기 위해 사용되는 아이콘을 정의합니다.',
'Defines the marker used for display & the attributes visible in the popup.': '표시 및 볼 수 있는 팝업에서 속성에 대해 사용된 마커를 정의합니다.',
'Degrees must be a number between -180 and 180': '도 사이의-180 및 180 숫자여야 합니다.',
'Dehydration': '디하이드레이션',
'Delete Alternative Item': '대안 항목 삭제',
'Delete Assessment Summary': '평가 요약을 삭제',
'Delete Assessment': '평가 삭제',
'Delete Asset Log Entry': '자산 삭제 로그 항목',
'Delete Asset': '자산 삭제',
'Delete Baseline Type': '삭제할 기준선 유형',
'Delete Baseline': '기준선 삭제',
'Delete Brand': '브랜드 삭제',
'Delete Budget': '예산 삭제',
'Delete Bundle': '번들 삭제',
'Delete Catalog Item': '목록 항목 삭제',
'Delete Catalog': '카탈로그 삭제',
'Delete Certificate': '인증서 삭제',
'Delete Certification': '인증 삭제',
'Delete Cluster Subsector': '클러스터 삭제 subsector',
'Delete Cluster': '클러스터 삭제',
'Delete Commitment Item': '삭제 확약 항목',
'Delete Commitment': '삭제 확약',
'Delete Competency Rating': '삭제할 능력 등급',
'Delete Competency': '능력 삭제',
'Delete Contact Information': '연락처 정보 삭제',
'Delete Course Certificate': '과정 certicate 삭제',
'Delete Course': '코스 삭제',
'Delete Credential': '권한 정보 삭제',
'Delete Document': '문서 삭제',
'Delete Donor': 'doner 삭제',
'Delete Entry': '항목 삭제',
'Delete Event': '이벤트 삭제',
'Delete Feature Layer': '삭제 기능을 layer',
'Delete Group': '그룹 삭제',
'Delete Hospital': '삭제할 병원',
'Delete Image': '이미지 삭제',
'Delete Impact Type': '삭제 영향 유형',
'Delete Impact': '영향 삭제',
'Delete Incident Report': '인시던트 보고서 삭제',
'Delete Item Category': '항목 카테고리 삭제',
'Delete Item Pack': '항목 팩 삭제',
'Delete Item': '항목 삭제',
'Delete Job Role': '작업 역할 삭제',
'Delete Key': '키 삭제',
'Delete Kit': 'delete kit',
'Delete Layer': '레이어 삭제',
'Delete Level 1 Assessment': '레벨 1 평가 삭제',
'Delete Level 2 Assessment': '레벨 2 평가 삭제',
'Delete Location': '위치 삭제',
'Delete Map Configuration': '맵 구성 삭제',
'Delete Marker': '마커 삭제',
'Delete Membership': '멤버쉽 삭제',
'Delete Message': '메시지 삭제',
'Delete Mission': '삭제할 임무',
'Delete Need Type': '삭제 하는 유형',
'Delete Need': '삭제 합니다',
'Delete Office': '삭제할 사무실',
'Delete Organization': '조직 삭제',
'Delete Peer': '피어 삭제',
'Delete Person': '작업자 삭제',
'Delete Photo': '사진 삭제',
'Delete Population Statistic': '인구 통계 삭제',
'Delete Position': '삭제 위치',
'Delete Project': '프로젝트 삭제',
'Delete Projection': '프로젝션 삭제',
'Delete Rapid Assessment': '빠른 평가 삭제',
'Delete Received Item': '수신된 삭제 항목',
'Delete Received Shipment': '수신된 shipment 삭제',
'Delete Record': '레코드 삭제',
'Delete Report': '보고서 삭제',
'Delete Request Item': '삭제 요청을 항목',
'Delete Request': '요청 삭제',
'Delete Resource': '자원 삭제',
'Delete Room': '강의실 삭제',
'Delete Scenario': '시나리오 삭제',
'Delete Section': '섹션 삭제',
'Delete Sector': '삭제할 섹터',
'Delete Sent Item': '삭제할 보낸 항목',
'Delete Sent Shipment': '송신된 shipment 삭제',
'Delete Service Profile': '서비스 프로파일 삭제',
'Delete Setting': '설정 삭제',
'Delete Skill Equivalence': '기술 equivalence 삭제',
'Delete Skill Provision': '삭제할 기술 제공',
'Delete Skill Type': '삭제할 항목 유형',
'Delete Skill': '스킬 삭제',
'Delete Staff Type': 'delete 직원 유형',
'Delete Status': '삭제 상태',
'Delete Subscription': '등록 삭제',
'Delete Subsector': '삭제 subsector',
'Delete Survey Answer': '삭제할 서베이 응답',
'Delete Survey Question': '서베이 질문 삭제',
'Delete Survey Series': '삭제할 서베이 시리즈',
'Delete Survey Template': '서베이 템플리트 삭제',
'Delete Training': '연계 삭제',
'Delete Unit': '단위 삭제',
'Delete User': '사용자 삭제',
'Delete Volunteer': 'delete 지원자',
'Delete from Server?': '서버에서?',
'Delete': '삭제',
'Delphi Decision Maker': 'delphi 결정',
'Demographic': '데모그래픽',
'Demonstrations': '데모',
'Dental Examination': 'dental 검사',
'Dental Profile': 'dental 프로파일',
'Describe the condition of the roads to your hospital.': '도로, 귀하의 병원 조건을 설명합니다.',
'Describe the procedure which this record relates to (e.g. "medical examination")': '이 레코드 (예: \\ " 의학 examination\\ " 과) 프로시저를 설명합니다',
'Description of Contacts': '문의처 설명',
'Description of defecation area': 'defecation 영역의 설명',
'Description of drinking water source': '설명 식수 (소스)',
'Description of sanitary water source': '설명 오수관 (소스)',
'Description of water source before the disaster': '물 소스 설명 피해 전',
'Desire to remain with family': '원하는 계열과의 남아',
'Destination': '대상',
'Destroyed': '파괴',
'Details field is required!': '세부사항 필드는 필수입니다!',
'Details': '세부사항',
'Diaphragms, horizontal bracing': 'diaphragms, 수평 bracing',
'Dignitary Visit': 'dignitary 방문하십시오.',
'Direction': '방향',
'Disable': '사용 불가능',
'Disabled participating in coping activities': '사용 활동을 복사하는 참여',
'Disabled': '사용 불가능',
'Disabled?': '사용?',
'Disaster Victim Identification': '재해 victim 식별',
'Disaster Victim Registry': '재해 victim 레지스트리',
'Disaster clean-up/repairs': '피해 up/repairs 정리',
'Discharges/24hrs': '소모됨/24hrs',
'Discussion Forum on item': '토론 포럼 항목',
'Discussion Forum': '토론 포럼',
'Disease vectors': '질병 벡터',
'Dispensary': 'dispensary',
'Displaced Populations': '프로덕트를 모집단의',
'Displaced': '프로덕트를',
'Display Polygons?': '다각형 표시?',
'Display Routes?': '표시할 라우트?',
'Display Tracks?': '표시 추적합니다?',
'Display Waypoints?': '표시 waypoints?',
'Distance between defecation area and water source': '거리 defecation 영역 및 물 소스 사이의',
'Distance from %s:': '% 의 거리:',
'Distance(Kms)': '거리 (kms)',
'Distribution groups': '분배 그룹',
'Distribution': '배포판',
'District': '특별지방자치단체',
'Do you really want to delete these records?': '이 레코드를 삭제하시겠습니까?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': '이 받은 shipment 취소하시겠습니까? 인벤토리에서 항목이 제거됩니다. 이 조치는 실행 취소할 수 없습니다.',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '이 shipment 보낸 취소하시겠습니까? 이 항목은 명세로 리턴됩니다. 이 조치는 실행 취소할 수 없습니다.',
'Do you want to receive this shipment?': '이 shipment?',
'Do you want to send these Committed items?': '이 확약된 항목을 보내시겠습니까?',
'Do you want to send this shipment?': '이 shipment 보내시겠습니까?',
'Document Details': '문서 세부사항',
'Document Scan': '문서 스캔',
'Document added': '문서 추가',
'Document deleted': '문서가 삭제됨',
'Document updated': '문서가 갱신됨',
'Documents and Photos': '문서 및 사진',
'Documents': '문서',
'Does this facility provide a cholera treatment center?': '이 시설이 콜레라 진료 센터를 제공합니까?',
'Doing nothing (no structured activity)': '지원 불필요 (조직적 활동 없음)',
'Dollars': '달러',
'Domain': '도메인(domain)',
'Domestic chores': '국내 chores',
'Donated': '기부',
'Donation Certificate': 'donation 인증',
'Donation Phone #': 'donation 전화번호 #',
'Donor Details': '제공자 세부사항',
'Donor added': '추가 공여',
'Donor deleted': 'doner 삭제됨',
'Donor updated': 'doner 갱신된',
'Donor': 'doner',
'Donors Report': 'donors 보고서',
'Door frame': '프레임 도어를',
'Download PDF': 'PDF 다운로드',
'Draft': 'DRAFT(초안)',
'Drainage': '드레인',
'Drawing up a Budget for Staff & Equipment across various Locations.': '직원 및 장비와 예산을 다양한 위치에 그림.',
'Drill Down by Group': '드릴 다운 그룹별',
'Drill Down by Incident': '드릴 다운 의해 인시던트',
'Drill Down by Shelter': '드릴 다운 shelter 의해',
'Driving License': '운전 면허증',
'Dug Well': 'dug 아니라',
'Duplicate?': '중복?',
'Duration': 'DURATION',
'Dust Storm': '심한 먼지',
'EMS Reason': 'ems 이유',
'EMS Status': 'ems 상태',
'ER Status Reason': 'ER 상태 원인',
'ER Status': 'ER 상태',
'Early Recovery': '빠른 복구',
'Earthquake': '지진',
'Edit Activity': '활동 편집',
'Edit Address': '주소 편집',
'Edit Alternative Item': '대체 항목 편집',
'Edit Application': '애플리케이션 편집',
'Edit Assessment Summary': '평가 요약 편집',
'Edit Assessment': '평가 편집',
'Edit Asset Log Entry': '자산 로그 항목 편집',
'Edit Asset': '자산 편집',
'Edit Baseline Type': '편집할 기준선 유형',
'Edit Baseline': '기준선 편집',
'Edit Brand': '브랜드 편집',
'Edit Budget': '예산 편집',
'Edit Bundle': '번들 편집',
'Edit Camp Service': '자녀를 서비스 편집',
'Edit Camp Type': '자녀를 유형 편집',
'Edit Camp': '자녀를 편집',
'Edit Catalog Item': '카탈로그 항목 편집',
'Edit Catalog': '카탈로그 편집',
'Edit Certificate': '인증서 편집',
'Edit Certification': '인증 편집',
'Edit Cluster Subsector': '클러스터 subsector 편집',
'Edit Cluster': '클러스터 편집',
'Edit Commitment Item': '편집 확약 항목',
'Edit Commitment': '편집 확약',
'Edit Competency Rating': '능력 등급 편집',
'Edit Competency': '자격 편집',
'Edit Contact Information': '연락처 정보 편집',
'Edit Contact': '연락처 편집',
'Edit Contents': '컨텐츠 편집',
'Edit Course Certificate': 'certicate 코스 편집',
'Edit Course': '코스 편집',
'Edit Credential': '신임 편집',
'Edit Dead Body Details': '데드 본문 세부사항 편집',
'Edit Description': '설명 편집',
'Edit Details': '세부사항 편집',
'Edit Disaster Victims': '피해 희생 편집',
'Edit Document': '문서 편집',
'Edit Donor': '편집 제공자',
'Edit Email Settings': '이메일 설정 편집',
'Edit Entry': '항목 편집',
'Edit Event': '이벤트 편집',
'Edit Facility': '기능 편집',
'Edit Feature Layer': '편집 기능)',
'Edit Flood Report': '플러드 보고서 편집',
'Edit Gateway Settings': '게이트웨이 설정 편집',
'Edit Group': '그룹 편집',
'Edit Hospital': '병원 편집',
'Edit Human Resource': '인적 자원 편집',
'Edit Identification Report': '식별 보고서 편집',
'Edit Identity': 'id 편집',
'Edit Image Details': '이미지 세부사항 편집',
'Edit Impact Type': '편집 유형 영향',
'Edit Impact': '영향 편집',
'Edit Incident Report': '인시던트 보고서 편집',
'Edit Inventory Item': '재고 항목 편집',
'Edit Item Category': '항목 카테고리 편집',
'Edit Item Pack': '항목 팩 편집',
'Edit Item': '항목 편집',
'Edit Job Role': '작업 역할 편집',
'Edit Key': '키 편집',
'Edit Kit': '키트 편집',
'Edit Layer': '계층 편집',
'Edit Level %d Locations?': '레벨% d 위치 편집?',
'Edit Level 1 Assessment': '레벨 1 평가 편집',
'Edit Level 2 Assessment': '레벨 2 평가 편집',
'Edit Location': '위치 편집',
'Edit Log Entry': '로그 항목 편집',
'Edit Map Configuration': '맵 구성 편집',
'Edit Map Services': '맵 서비스 편집',
'Edit Marker': '마커 편집',
'Edit Membership': '멤버십 편집',
'Edit Message': '메시지 편집',
'Edit Messaging Settings': '메시징 설정 편집',
'Edit Mission': '임무 편집',
'Edit Modem Settings': '모뎀 설정 편집',
'Edit Need Type': '필요한 유형 편집',
'Edit Need': '필요한 편집',
'Edit Office': '부재 편집',
'Edit Options': '편집 옵션',
'Edit Organization': '조직 편집',
'Edit Parameters': '매개변수 편집',
'Edit Peer Details': '피어 세부사항 편집',
'Edit Person Details': '편집할 사용자 세부사항',
'Edit Personal Effects Details': '개인 효과 세부사항 편집',
'Edit Photo': '사진 편집',
'Edit Population Statistic': '인구 통계 편집',
'Edit Position': '직위 편집',
'Edit Problem': '문제점 편집',
'Edit Project': '편집 프로젝트',
'Edit Projection': '프로젝션 편집',
'Edit Rapid Assessment': '빠른 평가 편집',
'Edit Received Item': '수신된 항목 편집',
'Edit Received Shipment': '수신된 shipment 편집',
'Edit Record': '레코드 편집',
'Edit Registration Details': '등록 세부사항 편집',
'Edit Registration': '등록 편집',
'Edit Request Item': '편집 항목 요청',
'Edit Request': '요청 편집',
'Edit Resource': '자원 편집',
'Edit River': '편집 강',
'Edit Role': '역할 편집',
'Edit Room': '미팅룸 편집',
'Edit Scenario': '시나리오 편집',
'Edit Sector': '편집 부문',
'Edit Sent Item': '보낸 항목 편집',
'Edit Setting': '설정 편집',
'Edit Settings': '설정 편집',
'Edit Shelter Service': 'shelter 서비스 편집',
'Edit Shelter Type': '편집 shelter 유형',
'Edit Shelter': '편집 shelter',
'Edit Skill Equivalence': '기술 equivalence 편집',
'Edit Skill Provision': '기술 provision 편집',
'Edit Skill Type': '편집 기술 유형',
'Edit Skill': '기술 항목 편집',
'Edit Solution': '솔루션 편집',
'Edit Staff Type': '편집 직원 유형',
'Edit Subscription': '등록 편집',
'Edit Subsector': '편집 subsector',
'Edit Survey Answer': '서베이 응답 편집',
'Edit Survey Question': '서베이 질문 편집',
'Edit Survey Series': '서베이 시리즈 편집',
'Edit Survey Template': '서베이 템플리트 편집',
'Edit Task': '태스크 편집',
'Edit Team': '팀 편집',
'Edit Theme': '테마 편집',
'Edit Themes': '테마 편집',
'Edit Ticket': '티켓 편집',
'Edit Track': '트랙 편집',
'Edit Training': '교육 편집',
'Edit Tropo Settings': 'tropo 설정 편집',
'Edit User': '사용자 편집',
'Edit Volunteer Availability': '지원자 가용성 편집',
'Edit Volunteer Details': '지원자 세부사항 편집',
'Edit Warehouse': '웨어하우스 편집',
'Edit current record': '현재 레코드 편집',
'Edit message': '메시지 편집',
'Edit': '편집',
'Editable?': '편집 가능 여부',
'Education materials received': '교육 자료 수신',
'Education materials, source': '교육 자료, 소스',
'Education': '교육',
'Effects Inventory': '자원 명세 효과',
'Eggs': '계란',
'Either a shelter or a location must be specified': '는 shelter 또는 위치를 지정해야 합니다.',
'Either file upload or document URL required.': '파일 업로드 또는 필요한 문서의 url.',
'Either file upload or image URL required.': '파일 업로드 또는 필요한 이미지 url.',
'Elderly person headed households (>60 yrs)': '친인척 개인 households (>60 세) 방향',
'Electrical': '전기',
'Electrical, gas, sewerage, water, hazmats': '전기, 가스, sewerage, 물, hazmats',
'Elevators': '엘리베이터',
'Email Address': '이메일 주소',
'Email Settings': '이메일 설정',
'Email settings updated': '이메일 설정 갱신',
'Email': '이메일',
'Embalming': 'embalming',
'Embassy': 'embassy',
'Emergency Capacity Building project': '용량 비상 프로젝트 빌드',
'Emergency Department': '긴급 department',
'Emergency Shelter': '비상 shelter',
'Emergency Support Facility': '긴급 지원 기능',
'Emergency Support Service': '긴급 지원 서비스',
'Emergency Telecommunications': 'telecommunications 비상',
'Enable/Disable Layers': '계층 사용/사용 안함',
'Enabled': '사용 가능',
'End Date': '종료 날짜',
'End date should be after start date': '종료 날짜는 시작 날짜 이후여야 합니다',
'End date': '종료 날짜',
'End of Period': '기간의 끝',
'Enter Coordinates:': '좌표를 입력하십시오.',
'Enter a GPS Coord': 'gps 좌표 입력',
'Enter a name for the spreadsheet you are uploading (mandatory).': '스프레드시트 대한 이름 (업로드하는) 입력하십시오.',
'Enter a new support request.': '새 지원 요청을 입력하십시오.',
'Enter a unique label!': '고유한 레이블을 입력하십시오!',
'Enter a valid date before': '올바른 날짜 입력',
'Enter a valid email': '올바른 전자 우편.',
'Enter a valid future date': '올바른 미래 날짜를 입력하십시오',
'Enter some characters to bring up a list of possible matches': '일부 문자를 일치사항이 목록을 표시하십시오.',
'Enter some characters to bring up a list of possible matches.': '일부 문자를 일치사항이 목록을 표시하십시오.',
'Enter tags separated by commas.': '쉼표로 분리된 태그를 입력하십시오.',
'Enter the same password as above': '위와 동일한 암호를 입력하십시오.',
'Entered': '입력됨',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '전화번호를 입력하는 선택적이지만, 그렇게 sms 메시지를 수신하도록 등록할 수 있습니다.',
'Entry deleted': '항목 삭제됨',
'Environment': '환경',
'Equipment': '장비',
'Error encountered while applying the theme.': '오류가 있는 테마를 적용하여 발견했습니다.',
'Error in message': '오류 메시지',
'Error logs for "%(app)s"': '오류 로그를 "%(app)s"',
'Errors': '오류',
'Est. Delivery Date': 'est. 전달 날짜',
'Estimated # of households who are affected by the emergency': '긴급 재난 상황에 영향을 받는 예상 가구수',
'Estimated # of people who are affected by the emergency': '긴급 재난 상황에 영향을 받는 예상 인원',
'Estimated Overall Building Damage': '예상되는 전체 빌딩 손실',
'Estimated total number of people in institutions': '총 예상 기관 사용자 수',
'Euros': 'euros',
'Evacuating': 'evacuating',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': '이 메시지의 정보를 평가하십시오. (이 값은 공용 경고 응용프로그램에서 사용해야 합니다. )',
'Event Details': '이벤트 세부사항',
'Event added': '이벤트 추가',
'Event deleted': '이벤트 삭제',
'Event updated': '이벤트가 업데이트되었습니다.',
'Event': '이벤트',
'Events': '이벤트',
'Example': '예제',
'Exceeded': '초과됨',
'Excellent': '최상',
'Exclude contents': '제외할 내용',
'Excreta disposal': 'excreta 폐기',
'Execute a pre-planned activity identified in <instruction>': '식별된 사전 계획된 활동 실행<instruction>',
'Exercise': '연습',
'Exercise?': '연습?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': '모든 연습을 화면을 워터마크를 모든 통지를 접두부가 있어야 합니다.',
'Existing Placard Type': '기존 placard 유형',
'Existing food stocks': '식음료권 기존 주식을',
'Existing location cannot be converted into a group.': '기존 위치 그룹 변환할 수 없습니다.',
'Exits': '엑시트',
'Experience': '경력',
'Expiry Date': '만료 날짜',
'Explosive Hazard': '폭발 위험',
'Export Data': '데이터 내보내기',
'Export Database as CSV': '데이터베이스 반출 csv',
'Export in GPX format': 'gpx 형식으로 내보내기',
'Export in KML format': 'kml 형식으로 내보내기',
'Export in OSM format': '내보내기 osm 형식으로',
'Export in PDF format': 'PDF 형식으로 반출',
'Export in RSS format': '내보내기 rss 형식으로',
'Export in XLS format': 'xls 형식으로 내보내기',
'Export': '내보내기',
'Exterior Only': '외부 전용',
'Exterior and Interior': '외부 및 내부',
'Eye Color': '눈 색상',
'Facebook': '페이스북',
'Facial hair, color': '얼굴 털, 색상',
'Facial hair, type': '얼굴 털. 모양',
'Facial hear, length': 'facial 듣고, 길이',
'Facilities': '기능',
'Facility Details': '기능 세부사항',
'Facility Operations': '기능 조작',
'Facility Status': '기능 상태',
'Facility Type': '설비 유형',
'Facility added': '기능 추가',
'Facility or Location': '기능 또는 위치',
'Facility removed': '제거된 기능',
'Facility updated': '기능 갱신',
'Fail': '실패',
'Failed!': '실패!',
'Fair': '양호',
'Falling Object Hazard': '오브젝트 hazard 폴백하기',
'Families/HH': '제품군에서/hh',
'Family tarpaulins received': 'tarpaulins 받은 제품군',
'Family tarpaulins, source': '제품군 tarpaulins, 소스',
'Family': '제품군',
'Family/friends': '제품군/friends',
'Farmland/fishing material assistance, Rank': 'farmland/어업 자재 지원, rank',
'Fax': '팩스',
'Feature Layer Details': '기능 계층 세부사항',
'Feature Layer added': '기능 계층 추가',
'Feature Layer deleted': '기능 계층 삭제',
'Feature Layer updated': 'feature layer 갱신된',
'Feature Layers': '기능 계층',
'Feature Namespace': '기능 이름',
'Feature Request': '기능 요청',
'Feature Type': '기능 유형',
'Features Include': '기능 포함',
'Female headed households': '여성 머리 households',
'Few': '몇 가지',
'Field Hospital': '필드 병원',
'Field': '필드',
'File': '파일',
'Fill in Latitude': '위도를 입력하십시오',
'Fill in Longitude': '경도를 입력하십시오',
'Filter Field': '필터 필드',
'Filter Value': '필터 값',
'Filter': '필터',
'Find Dead Body Report': '데드 본문 보고서 찾기',
'Find Hospital': '병원 찾기',
'Find Person Record': '개인 정보 찾기',
'Find Volunteers': '자원봉사자 찾기',
'Find a Person Record': '개인 레코드 찾기',
'Find': '찾기',
'Finder': '파인더',
'Fingerprint': '지문',
'Fingerprinting': '지문 분석',
'Fingerprints': '지문',
'Finished Jobs': '완료된 작업',
'Fire suppression and rescue': '화재 진압 및 이동',
'Fire': '화재',
'First Name': '이름',
'First name': '이름',
'Fishing': '피싱',
'Flash Flood': '분류성 홍수',
'Flash Freeze': '급속 동결',
'Flexible Impact Assessments': '유연한 영향 평가',
'Flood Alerts show water levels in various parts of the country': '홍수 경보 표시 표시점 레벨을 국가 의 다양한 파트에서',
'Flood Alerts': '홍수 경보',
'Flood Report Details': '플러드 보고서 세부사항',
'Flood Report added': '플러드 보고서 추가',
'Flood Report deleted': '플러드 보고서 삭제',
'Flood Report updated': '플러드 보고서 갱신',
'Flood Report': '플러드 보고서',
'Flood Reports': '플러드 보고서',
'Flood': '홍수',
'Flow Status': '플로우 상태',
'Fog': 'fog',
'Food Supply': '식품 공급',
'Food assistance': '식품 지원',
'Food': '음식',
'Footer file %s missing!': '바닥글 파일% 가 누락되었습니다!',
'Footer': '바닥글',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '국가에 대한 이 ISO2 코드, town 수, 장소 locode 이 됩니다.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': '각 동기화 partner, 지정된 시간 간격 후에 실행되는 기본 동기화 작업입니다. 또한 필요에 따라 사용자 정의할 수 많은 동기화 작업을 설정할 수 있습니다. 시작하려면 오른쪽에 있는 링크를 누르십시오.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': '향상된 보안, 사용자 이름 및 암호를 입력하십시오 것이 권장됩니다, 관리자 및 조직 내의 다른 시스템의 사용자 이름 및 암호를 사용하여 uuid 대해 동기화 -> sync 파트너를 추가할 알림',
'For live help from the Sahana community on using this application, go to': '이 sahana 커뮤니티에서 이 응용프로그램 사용에 대한 실시간 도움말, 로 이동하십시오',
'For messages that support alert network internal functions': '경보 네트워크를 내부 기능을 지원하는 메시지',
'For more details on the Sahana Eden system, see the': '이 sahana eden 시스템 에 대한 자세한 내용은 를 참조하십시오',
'For more information, see': '자세한 정보는 의 내용을 참조하십시오.',
'For': '예를 들어,',
'Forest Fire': 'forest 화재',
'Formal camp': '정규 캠프',
'Format': '형식',
'Forms': '양식',
'Found': '발견됨',
'Foundations': 'foundations',
'Freezing Drizzle': 'drizzle 보류',
'Freezing Rain': '얼어붙은 비',
'Freezing Spray': '분무가 보류',
'French': '프랑스어',
'Friday': '금요일',
'From Inventory': '이전 재고',
'From Location': '원래 위치',
'From Organization': '원래 조직',
'From': '시작',
'Frost': 'frost',
'Fulfil. Status': '사건. STATUS',
'Fulfillment Status': '이행 상태',
'Full beard': '전체 beard',
'Full': '가득참',
'Fullscreen Map': '전체 화면 맵',
'Functions available': '사용 가능한 기능',
'Funding Organization': '자금 조직',
'Funeral': 'funeral',
'Further Action Recommended': '권장 조치',
'GIS Reports of Shelter': 'gis shelter 의 보고서',
'GIS integration to view location details of the Shelter': 'gis shelter 통합, 위치 세부사항 보기',
'GPS Marker': 'gps 마커',
'GPS Track File': 'gps 추적 파일',
'GPS Track': 'gps 추적',
'GPX Track': 'gpx 트랙',
'GRN Status': 'grn 상태',
'GRN': 'grn',
'Gale Wind': 'gale 바람',
'Gap Analysis Map': '맵 차이 분석',
'Gap Analysis Report': '차이 분석 보고서',
'Gap Analysis': '갭 분석',
'Gap Map': '맵 차이',
'Gap Report': '공백 보고서',
'Gateway Settings': '게이트웨이 설정',
'Gateway settings updated': '게이트웨이 설정 갱신',
'Gateway': '게이트웨이',
'Gender': '성별',
'General Comment': '일반적인 의견',
'General Medical/Surgical': '일반 의료/수술',
'General emergency and public safety': '일반적인 긴급 및 공공 안전',
'General information on demographics': '인구통계에 대한 일반적 정보',
'General': '일반',
'Generator': '생성기',
'Geocode': '지오코드',
'Geocoder Selection': '지오코더 선택',
'Geometry Name': 'geometry 이름',
'Geophysical (inc. landslide)': 'geophysical (inc. landslide)',
'Geotechnical Hazards': 'geotechnical 위험',
'Geotechnical': 'geotechnical',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'geraldo 모듈 사용 중인 python-이 내에서 pdf 출력 installing 합니다!',
'Get incoming recovery requests as RSS feed': '수신 복구 요청 get rss 피드',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '이미지의 간단한 설명을, 는 내용에 그림 (선택적) 에서 볼 수 있습니다.',
'Give information about where and when you have seen them': '위치에 대한 및 때 본 정보를 제공합니다',
'Global Messaging Settings': '글로벌 전달 설정',
'Go to Request': '요청 이동하십시오',
'Good Condition': '양호한지',
'Good': '양호',
'Goods Received Note': 'goods 참고 수신된',
'Government UID': '정부 uid',
'Government building': '정부 빌드',
'Government': '정부',
'Grade': '등급',
'Greek': '그리스어',
'Green': '초록색',
'Ground movement, fissures': '육상 이동, fissures',
'Ground movement, settlement, slips': '육상 이동, 결제, 전표',
'Group Description': '그룹 설명',
'Group Details': '그룹 세부사항',
'Group Member added': '그룹 구성원 추가',
'Group Members': '그룹 구성원',
'Group Memberships': '그룹 멤버쉽',
'Group Name': '그룹 이름',
'Group Title': '그룹 제목',
'Group Type': '그룹 유형',
'Group added': '추가된 그룹',
'Group deleted': '그룹 삭제',
'Group description': '그룹 설명',
'Group updated': '그룹 갱신',
'Group': '그룹',
'Groups removed': '그룹 제거',
'Groups': '그룹',
'Guest': '게스트',
'HR Manager': 'HR 관리자',
'Hail': '우박',
'Hair Color': '머리 색상',
'Hair Length': '십자선 길이',
'Hair Style': '십자선 스타일',
'Has data from this Reference Document been entered into Sahana?': '(이 참조 문서의 데이터를 sahana 입력됩니다?',
'Has the Certificate for receipt of the shipment been given to the sender?': '있는 운송물 수령 인증 송신자에게 제공되었습니다?',
'Has the GRN (Goods Received Note) been completed?': '있는 grn (goods 수신했습니다) 완료되었습니다?',
'Hazard Pay': '지불 위험',
'Hazardous Material': '위험물',
'Hazardous Road Conditions': '위험 조건.',
'Header Background': '헤더 배경',
'Header background file %s missing!': '헤더 백그라운드 파일% 가 누락되었습니다!',
'Headquarters': '본사',
'Health care assistance, Rank': '의료 지원, rank',
'Health center with beds': 'health center 를 의료용',
'Health center without beds': 'health center 의료용 없이',
'Health center': 'Health Center',
'Health services status': '상태 서비스 상태',
'Health': '시스템 상태',
'Healthcare Worker': 'healthcare worker',
'Heat Wave': '열 물결선',
'Heat and Humidity': '열 및 습도',
'Height (cm)': '높이 (cm)',
'Height (m)': '높이 (m)',
'Height': '높이',
'Help': '도움말',
'Helps to monitor status of hospitals': '도움이 병원 상태 모니터',
'Helps to report and search for missing persons': '데 보고서 및 누락된 사람이 검색하려면',
'Here are the solution items related to the problem.': '여기에 문제와 관련된 솔루션을 항목입니다.',
'Heritage Listed': 'heritage 나열됩니다',
'Hierarchy Level 0 Name (i.e. Country)': '계층 구조 레벨 0 이름 (국가)',
'Hierarchy Level 1 Name (e.g. State or Province)': '계층 구조 레벨 1 이름 (예: 도)',
'Hierarchy Level 2 Name (e.g. District or County)': '계층 구조 레벨 2 이름 (예: 지역 또는 국가)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': '계층 구조 레벨 3 이름 (예: 구/군/읍/village)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': '계층 구조 레벨 4 이름 (예: 근방)',
'Hierarchy Level 5 Name': '계층 구조 레벨 5 이름',
'High Water': '최고 사용 표시점',
'High': '높음',
'History': '내력',
'Hit the back button on your browser to try again.': '브라우저의 뒤로 단추를 사용하여 다시 시도하십시오.',
'Holiday Address': '공휴일 주소',
'Home Address': '집 주소',
'Home Country': '자택 국가',
'Home Crime': '범죄 홈',
'Home': '홈',
'Hospital Details': '병원 세부사항',
'Hospital Status Report': '병원 상태 보고서',
'Hospital information added': '병원 정보 추가',
'Hospital information deleted': '병원 정보 삭제',
'Hospital information updated': '병원 정보 갱신',
'Hospital status assessment.': '병원 상태 평가.',
'Hospital': '병원',
'Hospitals': '병원',
'Hot Spot': '핫 스팟',
'Hour': '시간',
'Hours': '시간',
'Household kits received': '가족 킷을 받은',
'Household kits, source': '가족 킷, 소스',
'How does it work?': '작동 방법?',
'How is this person affected by the disaster? (Select all that apply)': '이 방법을 사람은 피해 받지? (해당되는 사항을 모두 선택하십시오)',
'How long will the food last?': '얼마 동안, 식품 마지막?',
'How many Boys (0-17 yrs) are Dead due to the crisis': '얼마나 많은 boys (0-17 세) 인해 위기 에 폐기된 경우',
'How many Boys (0-17 yrs) are Injured due to the crisis': '얼마나 많은 boys (0-17 세) 인해 위기 에 injured.',
'How many Boys (0-17 yrs) are Missing due to the crisis': '얼마나 많은 boys (0-17 세) 인해 위기 누락되었습니다',
'How many Girls (0-17 yrs) are Dead due to the crisis': '얼마나 많은 girls (0-17 세) 인해 위기 에 폐기된 경우',
'How many Girls (0-17 yrs) are Injured due to the crisis': '얼마나 많은 girls (0-17 세) 인해 위기 에 injured.',
'How many Girls (0-17 yrs) are Missing due to the crisis': '얼마나 많은 girls (0-17 세) 인해 위기 에 누락되었습니다.',
'How many Men (18 yrs+) are Dead due to the crisis': '얼마나 많은 남성 (18 yrs+) 인해 위기 에 폐기된 경우',
'How many Men (18 yrs+) are Injured due to the crisis': '얼마나 많은 남성 (18 yrs+) 인해 위기 에 injured.',
'How many Men (18 yrs+) are Missing due to the crisis': '얼마나 많은 남성 (18 yrs+) 로 인해 위기 누락되었습니다',
'How many Women (18 yrs+) are Dead due to the crisis': '얼마나 많은 여성 (18 yrs+) 인해 위기 에 폐기된 경우',
'How many Women (18 yrs+) are Injured due to the crisis': '얼마나 많은 여성 (18 yrs+) 인해 위기 에 injured.',
'How many Women (18 yrs+) are Missing due to the crisis': '얼마나 많은 여성 (18 yrs+) 로 인해 위기 누락되었습니다',
'How many days will the supplies last?': '일, 공급품 마지막?',
'How many new cases have been admitted to this facility in the past 24h?': '얼마나 많은 새 유스 지난 24h 이 기능을 admitted?',
'How many of the patients with the disease died in the past 24h at this facility?': '얼마나 많은 환자 중 질병 이 기능에 지난 24h 중지되었습니다?',
'How many patients with the disease are currently hospitalized at this facility?': '얼마나 많은 환자를 질병 과 현재 이 기능에 hospitalized?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': '얼마나 많은 세부사항이 표시됩니다. 높은 확대/축소 레벨 세부사항 많은 것을, 광역. 낮은 확대/축소 레벨로 광역 보지 않습니다 세부사항 상위 레벨.',
'Human Resource Details': '인적 자원 세부사항',
'Human Resource Management': '인적 자원 관리',
'Human Resource added': '추가된 인적 자원',
'Human Resource removed': '인적 자원 제거',
'Human Resource updated': '인적 자원 갱신',
'Human Resource': '인적 자원',
'Human Resources Management': '인적 자원 관리',
'Hurricane Force Wind': '허리케인 강제 바람',
'Hurricane': '허리케인',
'Hygiene NFIs': '조치 nfis',
'Hygiene kits received': '예방 킷을 받은',
'Hygiene kits, source': '예방 kits, 소스',
'Hygiene practice': '조치 사례',
'Hygiene problems': '문제점 예방',
'Hygiene': '조치',
'I am available in the following area(s)': '다음 영역 (s) 에서 사용 가능한 am',
'ID Tag Number': 'ID 태그 번호',
'ID Tag': 'ID 태그',
'ID type': 'ID 유형',
'Ice Pressure': 'ice 압력',
'Identification Report': '보고서 식별',
'Identification Reports': '보고서 식별',
'Identification Status': '상태 식별',
'Identified as': '식별된',
'Identified by': '식별됩니다',
'Identity Details': '항목 세부사항',
'Identity added': 'id 추가',
'Identity deleted': '삭제된 id',
'Identity updated': '항목이 갱신 되었습니다',
'If a ticket was issued then please provide the Ticket ID.': '티켓 발행된 경우, 티켓 id 를 제공하십시오.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': '사용자가 이 도메인 전자 우편 주소, 승인자 필드 자신의 여부 및 담당자에 의해 추가 승인이 필요한지 여부를 판별하는 데 사용됩니다.',
'If it is a URL leading to HTML, then this will downloaded.': 'html 이끄는 url 이면 이 다운로드되지 않습니다.',
'If neither are defined, then the Default Marker is used.': '모두 정의된 경우, 다음 기본 마커가 사용됩니다.',
'If no marker defined then the system default marker is used': '그런 경우, 시스템 디폴트 마커 정의된 마커 사용됩니다.',
'If no, specify why': '아니오인 경우, 지정',
'If none are selected, then all are searched.': '아무 것도 선택되지 않은 경우, 모든 검색됩니다.',
'If the location is a geographic area, then state at what level here.': '위치는 지리 영역에서 경우 레벨을 여기에 상태.',
'If the request type is "Other", please enter request details here.': '요청 유형이 \\ " other\\ " 입니다, 요청 세부사항을 입력하십시오.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': '이 필드가 채워진 경우 도메인 지정된 사용자를 자동으로 이 조직의 직원은 지정됩니다',
'If this is set to True then mails will be deleted from the server after downloading.': '이 true 로 설정된 경우 메일 서버에서 다운로드한 후에 삭제됩니다.',
'If this record should be restricted then select which role is required to access the record here.': '이 레코드 제한되어야 하는 그런 역할이 레코드 여기에 액세스하는 데 필요합니다.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': '이 레코드 제한해야 하는 경우 다음 역할 레코드 여기에 액세스할 수 있습니다.',
'If yes, specify what and by whom': '예, 및 지정하십시오.',
'If yes, which and how': '예, 및',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '참조 문서 입력하지 않은 경우, 전자 이 데이터를 확인할 수 있도록 표시됩니다.',
'If you know what the Geonames ID of this location is then you can enter it here.': '어떤 geonames id 이 위치의 아는 경우, 여기에 입력할 수 있습니다.',
'If you know what the OSM ID of this location is then you can enter it here.': '어떤 osm id 이 위치를 아는 경우 여기에 입력할 수 있습니다.',
'If you need to add a new document then you can click here to attach one.': '새 문서를 추가해야 할 경우에는 여기에 하나 첨부할 수 있습니다.',
'If you want several values, then separate with': '여러 값, 구분하십시오',
'If you would like to help, then please': '도움말을 보려는 경우',
'Illegal Immigrant': '잘못된 immigrant',
'Image Details': '이미지 세부사항',
'Image Tags': '이미지 태그',
'Image Type': '이미지 유형',
'Image Upload': '이미지 업로드',
'Image added': '이미지 추가',
'Image deleted': '이미지 삭제',
'Image updated': '이미지 갱신',
'Image': '이미지',
'Imagery': '이미지',
'Impact Assessments': '영향 평가',
'Impact Details': '영향 세부사항',
'Impact Type Details': '영향 유형 세부사항',
'Impact Type added': '영향 유형 추가',
'Impact Type deleted': '영향 유형 삭제',
'Impact Type updated': '영향 유형 갱신',
'Impact Type': '영향 유형',
'Impact Types': '임팩트 유형',
'Impact added': '영향 추가됩니다',
'Impact deleted': '영향 삭제',
'Impact updated': '갱신된 영향',
'Impacts': '영향',
'Import & Export Data': '데이타 가져오기 및 내보내기',
'Import Data': '데이터 가져오기',
'Import Jobs': '가져오기 작업',
'Import and Export': '가져오기 및 내보내기',
'Import from Ushahidi Instance': 'ushahidi 인스턴스 가져오기',
'Import if Master': '가져올 마스터 경우',
'Import multiple tables as CSV': '가져오기 여러 테이블을 csv 로',
'Import': '가져오기',
'Import/Export': '가져오기/내보내기',
'Important': '중요',
'Importantly where there are no aid services being provided': '지원 서비스가 제공되지 않는 곳에서는 중요하게',
'Importing data from spreadsheets': '스프레드시트에서 데이터 가져오기',
'Improper decontamination': '부적절한 decontamination',
'Improper handling of dead bodies': '부적절한 취급 데드 본문의',
'In Catalogs': '카탈로그에',
'In Inventories': '재고가 있는',
'In Process': '진행 중',
'In Progress': '진행 중',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': '창 레이아웃 maximises 맵 창을 채울 수 없으므로 하는 큰 값을 설정하십시오.',
'Inbound Mail Settings': '인바운드 메일 설정',
'Incident Categories': '인시던트 범주',
'Incident Report Details': '인시던트 세부사항 보고서',
'Incident Report added': '추가된 보고하십시오.',
'Incident Report deleted': '인시던트 보고서 삭제',
'Incident Report updated': '갱신된 보고하십시오.',
'Incident Report': '인시던트 보고서',
'Incident Reporting System': '인시던트 보고 시스템',
'Incident Reporting': '인시던트 보고',
'Incident Reports': '인시던트 보고서',
'Incident': '인시던트',
'Incidents': '인시던트',
'Incoming Shipment canceled': '들어오는 shipment 취소됨',
'Incoming Shipment updated': 'shipment 갱신된 수신',
'Incoming': '수신',
'Incomplete': '완료되지 않음',
'Individuals': '개인',
'Industrial Crime': '산업 범죄',
'Industrial': '산업',
'Industry Fire': '산업 화재',
'Infectious Disease (Hazardous Material)': 'infectious 질병 (위험)',
'Infectious Disease': 'infectious 질병',
'Infectious Diseases': 'infectious 질병에',
'Informal Leader': '비공식 리더',
'Informal camp': '비정규 캠프',
'Information gaps': '갭 정보',
'Infusion catheters available': 'infusion catheters 사용',
'Infusion catheters need per 24h': 'infusion catheters 24h 합니다',
'Infusion catheters needed per 24h': 'infusion catheters 24h 당 필요한',
'Infusions available': 'infusions 사용',
'Infusions needed per 24h': 'infusions 24h 당 필요한',
'Inspected': '검사됨',
'Inspection Date': '검사 날짜',
'Inspection date and time': '검사 날짜 및 시간',
'Inspection time': '검사 시간',
'Inspector ID': '검사원 ID',
'Instant Porridge': '인스턴트 porridge',
'Institution': '기관',
'Insufficient vars: Need module, resource, jresource, instance': '충분하지 않은 vars: 모듈, 자원, jresource, 인스턴스 합니다',
'Insufficient': '충분하지 않음',
'Intake Items': '흡입구 항목',
'Intergovernmental Organization': 'intergovernmental 조직',
'Interior walls, partitions': '내부 앞문에, 파티션',
'Internal State': '내부 상태',
'International NGO': '국제 ngo',
'International Organization': '국제',
'Interview taking place at': '인터뷰 시 일어나는',
'Invalid Query': '유효하지 않은 쿼리',
'Invalid request!': '올바르지 않은 요청!',
'Invalid ticket': '유효하지 않은 티켓',
'Invalid': '올바르지 않음',
'Inventories': '비축량이',
'Inventory Item Details': '재고 품목 세부사항',
'Inventory Item updated': '자원 명세 품목 갱신',
'Inventory Item': '재고 항목',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': '재고 품목을 해당 대상의 자산 전환됩니다 모두 표시됩니다 소비 공급품 & 포함됩니다.',
'Inventory Items': '재고 항목',
'Inventory Management': '인벤토리 관리',
'Inventory of Effects': '자원 명세 효과',
'Is editing level L%d locations allowed?': '허용 레벨 l% d 위치 편집?',
'Is it safe to collect water?': '이를 안전한 물 수집할 수 있습니까?',
'Is this a strict hierarchy?': '이 엄격한 계층?',
'Issuing Authority': '발행 권한자',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '이는 단지 둘째, 활성 캡처하지만 또한 각 영역에서 제공하는 프로젝트 범위에 대한 정보를 캡처합니다.',
'Item Added to Shipment': '운송 항목에 추가됩니다',
'Item Catalog Details': '카탈로그 항목 세부사항',
'Item Categories': '항목 카테고리',
'Item Category Details': '항목 카테고리 세부사항',
'Item Category added': '항목에 카테고리 추가',
'Item Category deleted': '삭제된 항목 카테고리',
'Item Category updated': '갱신된 항목 카테고리',
'Item Category': '항목 대 카테고리',
'Item Details': '품목 세부사항',
'Item Pack Details': '항목 팩 세부사항',
'Item Pack added': '항목 팩 추가',
'Item Pack deleted': '항목 팩 삭제',
'Item Pack updated': '항목 팩이 갱신되었습니다',
'Item Packs': '항목 팩들',
'Item added to Inventory': '항목 명세에 추가됩니다',
'Item added to shipment': '운송 항목에 추가됩니다',
'Item added': '항목 추가됨',
'Item already in Bundle!': '항목이 이미 번들에!',
'Item already in Kit!': '항목이 이미 킷에!',
'Item already in budget!': '항목이 이미 예산!',
'Item deleted': '항목 삭제됨',
'Item removed from Inventory': '항목 명세로부터 제거됩니다',
'Item updated': '항목 갱신',
'Items in Category can be Assets': '카테고리 항목 자산 수',
'Items': '항목',
'Japanese': '일본어',
'Jerry can': 'jerry 수',
'Job Role Catalog': '작업 역할 카탈로그',
'Job Role Details': '작업 역할 세부사항',
'Job Role added': '작업 역할 추가',
'Job Role deleted': '작업 역할 삭제',
'Job Role updated': '작업 역할 갱신',
'Job Role': '업무 역할',
'Job Roles': '직무',
'Job Title': '직위',
'Jobs': '작업',
'Journal Entry Details': '저널 항목 세부사항',
'Journal entry added': '저널 항목 추가',
'Journal entry deleted': '저널 항목 삭제',
'Journal entry updated': '저널 항목이 갱신됩니다',
'Journal': '문서철',
'Key Details': '키 세부사항',
'Key added': '키 추가',
'Key deleted': '키 삭제',
'Key updated': '키 갱신',
'Key': '키',
'Keys': '키',
'Kit Contents': '키트 내용',
'Kit Details': '킷 세부사항',
'Kit Updated': '갱신된 상품',
'Kit added': '상품 추가',
'Kit deleted': '킷 삭제',
'Kit updated': '갱신된 상품',
'Kit': '킷',
'Kits': '킷',
'Known Identities': '알려진 id',
'Known incidents of violence against women/girls': '인시던트 알려진 여성/girls 에 violence 의',
'Known incidents of violence since disaster': '인시던트 알려진 violence 재해 이후',
'Korean': '한국어',
'LICENSE': '라이센스',
'Lack of material': '자재 부족',
'Lack of school uniform': '학교 부족으로 uniform 중',
'Lack of supplies at school': '공급품 부족 시 학교',
'Lack of transport to school': '전송으로 인해 학교 중',
'Lactating women': 'lactating 여성',
'Language': '언어',
'Last Name': '성',
'Last known location': '마지막 알려진 위치에서',
'Last synchronization time': '마지막 동기화 시간',
'Last updated by': '마지막 갱신자',
'Last updated on': '마지막 업데이트 날짜',
'Last updated': '최종 업데이트 날짜',
'Latitude & Longitude': '위도 및 경도',
'Latitude is North-South (Up-Down).': '위도의 경우 북쪽-남쪽 (위로-아래로).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '적도는 위도 on 이면 양수, 남부 서반구를 있는 북부 서반구를 및 음수.',
'Latitude of Map Center': '위도 센터의 맵',
'Latitude of far northern end of the region of interest.': '관심 있는 리젼의 지금까지 북부 종료 위도입니다.',
'Latitude of far southern end of the region of interest.': '관심 있는 리젼의 지금까지 남부 종료 위도입니다.',
'Latitude should be between': '위도 사이여야 합니다',
'Latitude': '위도',
'Law enforcement, military, homeland and local/private security': '법률 시행, 군사, 국토 및 로컬/개인용 보안',
'Layer Details': 'ssl 세부사항',
'Layer added': '계층 추가',
'Layer deleted': '레이어 삭제',
'Layer updated': '갱신된 ssl',
'Layer': '계층',
'Layers updated': '갱신된 레이어를',
'Layers': '계층',
'Leader': '리더',
'Legend Format': '범례 형식',
'Length (m)': '길이 (m)',
'Level 1 Assessment Details': '레벨 1 평가 세부사항',
'Level 1 Assessment added': '레벨 1 평가 추가',
'Level 1 Assessment deleted': '레벨 1 평가 삭제',
'Level 1 Assessment updated': '레벨 1 평가 갱신',
'Level 1 Assessments': '평가 레벨 1',
'Level 1': '레벨 1',
'Level 2 Assessment Details': '레벨 2 평가 세부사항',
'Level 2 Assessment added': '레벨 2 평가 추가',
'Level 2 Assessment deleted': '레벨 2 평가 삭제',
'Level 2 Assessment updated': '레벨 2 평가 갱신',
'Level 2 Assessments': '평가 레벨 2',
'Level 2 or detailed engineering evaluation recommended': '레벨 2 상세 엔지니어링 평가 권장',
'Level 2': '레벨 2',
'Level': '레벨',
'Library support not available for OpenID': '라이브러리 지원 사용 openid 대한',
'LineString': '선스트링',
'List / Add Baseline Types': '목록/기준선 추가 유형',
'List / Add Impact Types': '목록/추가 영향 유형',
'List / Add Services': '목록/서비스',
'List / Add Types': '목록/추가 유형',
'List Activities': '활동 나열',
'List All Assets': '모든 자산',
'List All Catalog Items': '모든 카탈로그 항목',
'List All Commitments': '모든 commitments',
'List All Entries': '목록에서 모든 항목',
'List All Item Categories': '목록에서 모든 항목 카테고리',
'List All Memberships': '모든 멤버쉽',
'List All Received Shipments': '수신된 모든 운송물을',
'List All Records': '모든 레코드',
'List All Requested Items': '요청된 모든 목록 항목',
'List All Requests': '모든 요청',
'List All Sent Shipments': '목록에서 모든 보낸 운송물을',
'List All': '모두 나열',
'List Alternative Items': '목록에서 대체 항목',
'List Assessment Summaries': '목록에서 평가 요약',
'List Assessments': '목록에서 평가',
'List Assets': '자산 나열',
'List Availability': '가용성 목록',
'List Baseline Types': '목록에서 기준선 유형',
'List Baselines': '목록에서 기준선을',
'List Brands': '목록에서 브랜드',
'List Budgets': '예산 목록',
'List Bundles': 'Bundle 나열',
'List Camp Services': '목록에서 자녀를 서비스',
'List Camp Types': '목록 유형 캠프',
'List Camps': '목록 camps',
'List Catalog Items': '카탈로그 항목 목록',
'List Catalogs': '카탈로그 목록',
'List Certificates': '인증서 목록',
'List Certifications': '인증 목록',
'List Checklists': '점검 목록',
'List Cluster Subsectors': '클러스터 subsectors',
'List Clusters': '클러스터 목록',
'List Commitment Items': '목록 항목 확약',
'List Commitments': '목록에서 commitments',
'List Competencies': '목록에서 능력',
'List Competency Ratings': '목록에서 능력 등급',
'List Conflicts': '목록 충돌',
'List Contact Information': '연락처 목록 정보',
'List Contacts': '담당자 나열',
'List Course Certificates': '과정 목록 certicates',
'List Courses': '과정 목록',
'List Credentials': '권한 목록',
'List Current': '현재 목록',
'List Documents': '문서 목록',
'List Donors': '목록 donors',
'List Events': '이벤트 목록',
'List Facilities': '기능 목록',
'List Feature Layers': '계층 목록 기능',
'List Flood Reports': '목록에서 플러드 보고서',
'List Groups': '그룹을 나열합니다.',
'List Groups/View Members': '그룹/보기 멤버',
'List Hospitals': '목록에서 병원',
'List Human Resources': '인적 자원 목록',
'List Identities': '리스트 id',
'List Images': '이미지 목록',
'List Impact Assessments': '목록에서 영향 평가',
'List Impact Types': '목록에서 영향 유형',
'List Impacts': '목록에서 영향',
'List Incident Reports': '인시던트 목록 보고서',
'List Item Categories': '목록 항목 카테고리',
'List Item Packs': '리스트 항목이 팩',
'List Items in Inventory': '목록에서 항목을 명세에',
'List Items': '항목 나열',
'List Job Roles': '작업 역할 목록',
'List Keys': '키 나열',
'List Kits': '킷 목록',
'List Layers': '레이어 목록',
'List Level 1 Assessments': '목록에서 레벨 1 평가',
'List Level 1 assessments': '목록에서 레벨 1 평가',
'List Level 2 Assessments': '목록에서 레벨 2 평가',
'List Level 2 assessments': '목록에서 레벨 2 평가',
'List Locations': '위치 목록',
'List Log Entries': '로그 항목 목록',
'List Map Configurations': '목록에서 맵 구성',
'List Markers': '목록에서 표시문자',
'List Members': '구성원 목록',
'List Memberships': '멤버쉽 리스트',
'List Messages': '메시지 목록',
'List Missing Persons': '목록에서 누락된 사람이',
'List Missions': '목록에서 임무',
'List Need Types': '목록에서 필요한 유형을',
'List Needs': '목록에서 합니다',
'List Offices': '목록 offices',
'List Organizations': '조직 목록',
'List Peers': '피어 목록',
'List Personal Effects': '목록에서 개인 효과',
'List Persons': '사용자 목록',
'List Photos': '목록에서 사진',
'List Population Statistics': '목록에서 인구 통계',
'List Positions': '위치 목록',
'List Problems': '목록 문제점',
'List Projections': '프로젝션 목록',
'List Projects': '프로젝트 목록',
'List Rapid Assessments': '목록에서 신속한 평가',
'List Received Items': '목록에서 받은 항목',
'List Received Shipments': '목록에서 받은 운송물을',
'List Records': '레코드 목록',
'List Registrations': '등록 목록',
'List Reports': '목록 보고서',
'List Request Items': '요청 목록 항목',
'List Requests': '요청 목록',
'List Resources': '자원 표시',
'List Rivers': '목록에서 강',
'List Roles': '역할 리스트',
'List Rooms': '회의실 목록',
'List Scenarios': '시나리오 목록',
'List Sections': '목록 섹션',
'List Sectors': '목록에서 sectors',
'List Sent Items': '전송된 항목 목록',
'List Sent Shipments': '목록에서 보낸 운송물을',
'List Service Profiles': '목록에서 서비스 프로파일',
'List Settings': '목록 설정',
'List Shelter Services': 'shelter 서비스 목록',
'List Shelter Types': 'shelter 유형 목록',
'List Shelters': '목록 shelters',
'List Skill Equivalences': '목록에서 기술 equivalences',
'List Skill Provisions': '목록에서 기술 조항',
'List Skill Types': '목록 항목 유형',
'List Skills': '기술 목록',
'List Solutions': '솔루션 목록',
'List Staff Types': '목록에서 직원 유형',
'List Status': '목록 상태',
'List Subscriptions': '등록 목록',
'List Subsectors': '목록 subsectors',
'List Support Requests': '목록 지원 요청',
'List Survey Answers': '목록에서 서베이 응답을',
'List Survey Questions': '목록에서 서베이 질문',
'List Survey Series': '목록에서 서베이 시리즈',
'List Survey Templates': '목록에서 서베이 템플리트',
'List Tasks': '태스크 나열',
'List Teams': '팀 목록',
'List Themes': '목록 테마',
'List Tickets': '티켓 목록',
'List Tracks': '목록 트랙',
'List Trainings': '목록 trainings',
'List Units': '장치 목록',
'List Users': '사용자 나열',
'List Warehouses': '목록 웨어하우스를',
'List all': '모두 나열',
'List available Scenarios': '사용 가능한 시나리오',
'List of Items': '항목 목록',
'List of Missing Persons': '목록에서 누락된 개인',
'List of Peers': '피어 목록',
'List of Reports': '보고서 목록',
'List of Requests': '요청의 목록',
'List of Spreadsheets uploaded': '목록에서 스프레드시트를 업로드할',
'List of Spreadsheets': '목록 스프레드시트를',
'List of Volunteers for this skill set': '이 기술 volunteers 목록 설정',
'List of Volunteers': '목록 volunteers 의',
'List of addresses': '주소 목록',
'List unidentified': '목록에서 비식별',
'List/Add': '목록/추가',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '누가 어떤 & where\\ " 수행 중인 나열합니다 \\ ". 릴리프 기관 자신의 활동을 조정하고',
'Live Help': '실시간 도움말',
'Load Cleaned Data into Database': '로드 정리된 데이터로 데이터베이스',
'Load Raw File into Grid': '로드 격자로 원시 파일',
'Loading': '로드 중',
'Local Name': '로컬 이름',
'Local Names': '로컬명',
'Location 1': '위치 1',
'Location 2': '위치 2',
'Location Details': '위치 세부사항',
'Location Hierarchy Level 0 Name': '위치 계층 구조 레벨 0 이름',
'Location Hierarchy Level 1 Name': '위치 계층 구조 레벨 1 이름',
'Location Hierarchy Level 2 Name': '위치 계층 구조 레벨 2 이름',
'Location Hierarchy Level 3 Name': '위치 계층 구조 레벨 3 이름',
'Location Hierarchy Level 4 Name': '위치 계층 구조 레벨 4 이름',
'Location Hierarchy Level 5 Name': '위치 계층 구조 레벨 5 이름',
'Location added': '위치 추가',
'Location deleted': '삭제된 위치',
'Location group cannot be a parent.': '위치 그룹 상위 수 없습니다.',
'Location group cannot have a parent.': '위치 그룹 상위 가질 수 없습니다.',
'Location groups can be used in the Regions menu.': '위치 그룹 regions 메뉴에서 사용할 수 있습니다.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': '위치 그룹을 맵에 표시됩니다 필터링하는 데 사용할 수 있으며 검색 결과에 그룹 위치를 다루는 엔티티만.',
'Location updated': '위치 갱신',
'Location': '위치',
'Location:': '위치:',
'Locations of this level need to have a parent of level': '이 레벨의 위치 상위 레벨의 있어야 합니다',
'Locations': '위치',
'Lockdown': '잠금',
'Log Entry Details': '로그 항목 세부사항',
'Log entry added': '로그 항목 추가',
'Log entry deleted': '로그 항목이 삭제됩니다',
'Log entry updated': '로그 항목이 갱신됩니다',
'Log': '로그',
'Login': '로그인',
'Logistics Management System': '물류 관리 시스템',
'Logistics': '물류',
'Logo file %s missing!': '로고 파일% 가 누락되었습니다!',
'Logo': '로고',
'Logout': '로그아웃',
'Long Text': '긴 텍스트',
'Longitude is West - East (sideways).': '경선 서쪽-동쪽 (옆으로).',
'Longitude is West-East (sideways).': '경선 서쪽-동쪽 (옆으로).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '경도는 본초 자오선 은 (greenwich mean time) 인 양수, 중동, 유럽 및 아시아. 경도를 음수 서쪽에, 대서양 및 미국.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '경도는 본초 자오선 은 (greenwich, 영국) 를 통해 인 양수, 중동, 유럽 및 아시아. 경도를 음수 서쪽에, 대서양 및 미국.',
'Longitude of Map Center': '경도 센터의 맵',
'Longitude of far eastern end of the region of interest.': '지금까지 원하는 영역의 끝 동부 경도입니다.',
'Longitude of far western end of the region of interest.': '원하는 영역의 맨 끝에 서부 경도입니다.',
'Longitude should be between': '경도 사이여야 합니다',
'Longitude': '경도',
'Lost Password': '잊어버린 암호',
'Lost': '분실됨',
'Low': '낮음',
'Magnetic Storm': '자기 먹회색',
'Major Damage': '주요 손상',
'Major expenses': '주요 비용',
'Major outward damage': '주요 밑면을 손상',
'Make Commitment': '확약 확인하십시오',
'Make New Commitment': '새 확약 확인하십시오',
'Make Request': '요청',
'Make preparations per the <instruction>': '노드당 준비를 하십시오<instruction>',
'Manage Relief Item Catalogue': '관리 릴리프 항목 카탈로그',
'Manage Users & Roles': '사용자 및 역할',
'Manage Warehouses/Sites': '관리 웨어하우스를/사이트',
'Manage Your Facilities': '관리 기능',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '공급 관리 요청, 자산, 직원 또는 기타 자원. 여기서 제공하는 요청한 인벤토리의 대해 일치합니다.',
'Manage requests of hospitals for assistance.': '병원 지원을 관리합니다.',
'Manage volunteers by capturing their skills, availability and allocation': '관리 volunteers 기술, 가용성 및 캡처하여 할당',
'Managing Office': '사무실 관리',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '필수. geoserver, 계층 이름. wfs getcapabilities 내에서 이 콜론 (:) featuretype 이름 파트.',
'Mandatory. The URL to access the service.': '필수. 서비스 url 에 액세스하십시오.',
'Manual Synchronization': '수동 동기화',
'Manual': '수동',
'Many': '다수',
'Map Center Latitude': '맵 center 위도',
'Map Center Longitude': '지도 중심 경도',
'Map Configuration Details': '지도 구성 세부사항',
'Map Configuration added': '지도 구성이 추가되었습니다',
'Map Configuration deleted': '지도 구성이 삭제되었습니다',
'Map Configuration removed': '지도 구성이 삭제되었습니다',
'Map Configuration updated': '맵 구성 갱신',
'Map Configuration': '지도 구성',
'Map Configurations': '지도 구성',
'Map Height': '맵 높이',
'Map Service Catalog': '맵 서비스 카탈로그',
'Map Settings': '맵 설정',
'Map Viewing Client': '맵핑 보기 클라이언트',
'Map Width': '맵 너비',
'Map Zoom': '맵 확대',
'Map of Hospitals': '맵 병원 중',
'Map': 'map',
'Marine Security': 'marine 보안',
'Marital Status': '결혼 여부',
'Marker Details': '마커 세부사항',
'Marker added': '마커 추가',
'Marker deleted': '마커 삭제',
'Marker updated': '마커 갱신',
'Marker': '마커',
'Markers': '마커',
'Master Message Log to process incoming reports & requests': '마스터 메시지 로그 수신 보고서를 & 요청을 처리하기 위해',
'Master Message Log': '마스터 메시지 로그',
'Match Percentage': '일치 비율',
'Match Requests': '일치하는 요청',
'Match percentage indicates the % match between these two records': '일치하는 백분율 이 두 레코드 사이의% 일치 표시합니다',
'Match?': '일치 여부',
'Matching Catalog Items': '일치하는 카탈로그 항목',
'Matching Items': '일치하는 항목',
'Matching Records': '대응 레코드를',
'Maximum Location Latitude': '최대 위치는 위도',
'Maximum Location Longitude': '최대 위치 경도',
'Medical and public health': '의료 및 공중 보건',
'Medium': '중간',
'Megabytes per Month': 'mb 당 월',
'Member removed from Group': '멤버쉽 삭제',
'Members': '구성원',
'Membership Details': '멤버쉽 세부사항',
'Membership updated': '멤버쉽 갱신',
'Membership': '멤버쉽',
'Memberships': '멤버십',
'Message Details': '메시지 세부사항',
'Message Variable': '메시지 변수',
'Message added': '메시지 추가',
'Message deleted': '메시지를 삭제했습니다.',
'Message updated': '메시지 갱신됨',
'Message variable': '메시지 변수',
'Messaging settings updated': '메시징 설정 갱신',
'Messaging': '메시지',
'Meteorological (inc. flood)': 'meteorological (inc. 홍수)',
'Method used': '사용된 방법',
'Middle Name': '중간 이름',
'Migrants or ethnic minorities': 'migrants 인종 또는 minorities',
'Military': '군대',
'Minimum Location Latitude': '최소 위치는 위도',
'Minimum Location Longitude': '최소 위치의 경도',
'Minimum shift time is 6 hours': '최소 shift 6 시간 시간',
'Minor Damage': '부 손상',
'Minor/None': '부/없음',
'Minorities participating in coping activities': 'minorities 활동에 참여하는 복사',
'Minutes must be a number between 0 and 60': '0 분 및 60 사이의 숫자여야 합니다',
'Minutes per Month': '분 당 월',
'Minutes should be a number greater than 0 and less than 60': '0 분 및 0 보다 숫자여야 합니다',
'Miscellaneous': '기타',
'Missing Person Details': '누락된 사용자 세부사항',
'Missing Person Registry': '누락된 사용자 레지스트리',
'Missing Person': '누락된 사용자',
'Missing Persons Registry': '사용자 레지스트리 누락',
'Missing Persons Report': '개인 보고서 누락',
'Missing Persons': '누락된 사람이',
'Missing Report': '누락된 보고서',
'Missing Senior Citizen': 'senior 시민 누락',
'Missing Vulnerable Person': '취약한 개인 누락',
'Missing': '누락',
'Mission Details': '세부사항 임무',
'Mission Record': '레코드 임무',
'Mission added': '추가된 임무',
'Mission deleted': '삭제된 임무',
'Mission updated': '갱신된 mission',
'Missions': '임무',
'Mobile Basic Assessment': '모바일 기본 평가',
'Mobile Phone': '휴대폰',
'Mobile': '모바일',
'Mode': '모드',
'Model/Type': '유형/모델',
'Modem Settings': '모뎀 설정(M)',
'Modem settings updated': '모뎀 설정 갱신',
'Modem': '모뎀',
'Moderate': '중재',
'Moderator': '미팅장',
'Modify Information on groups and individuals': '그룹 및 개인 정보를 수정',
'Modifying data in spreadsheet before importing it to the database': '데이터 스프레드시트에, 데이터베이스에 반입하기 전에 수정',
'Module provides access to information on current Flood Levels.': '모듈 현재 홍수 정보에 대한 액세스를 제공합니다.',
'Module': '모듈',
'Monday': '월요일',
'Monthly Cost': '매월 비용',
'Monthly Salary': '월별 급여',
'Months': '월',
'Morgue Status': 'morgue 상태',
'Morgue Units Available': 'morgue 장치 사용',
'Motorcycle': '오토바이',
'Multiple Matches': '다중 일치사항',
'Must a location have a parent location?': '하는 위치에 상위 위치?',
'My Current function': '내 현재 함수',
'My Tasks': '나의 작업',
'NZSEE Level 1': 'nzsee 레벨 1',
'NZSEE Level 2': 'nzsee 레벨 2',
'Name and/or ID': '이름 및/또는 ID',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': '헤더의 배경에 사용되며 정적 공간에 저장된 파일(및 선택적 종속 경로)명',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '파일 이름 (& 의 선택적 부속 경로) 위-왼쪽 이미지 사용해야 하는 정적 위치합니다.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': '파일 이름 (& 의 선택적 부속 경로) 가 바닥글 사용할 수 있는 보기.',
'Name of the person in local language and script (optional).': '로컬 언어 및 스크립트 (선택적 개인).',
'Name, Org and/or ID': '이름, 조직 및/또는 id',
'Names can be added in multiple languages': '이름이 여러 언어로 추가할 수 있습니다',
'National ID Card': '국가 id 카드',
'National NGO': '자국어 ngo',
'National': '국가',
'Nationality of the person.': '개인의 nationality.',
'Nationality': '국적',
'Nautical Accident': 'nautical 사고',
'Nautical Hijacking': 'nautical 하이잭이라고',
'Need Type Details': '유형 세부사항 합니다',
'Need Type added': '유형을 추가할 필요가',
'Need Type deleted': '유형 삭제 합니다',
'Need Type updated': '유형을 갱신할 필요가',
'Need Type': '유형 합니다',
'Need Types': '유형 합니다',
'Need added': '추가해야 합니다',
'Need deleted': '삭제된 합니다',
'Need to be logged-in to be able to submit assessments': '로그온한 사용자는 에서 평가를 제출할 수 있도록 합니다',
'Need to configure Twitter Authentication': 'twitter 인증을 구성해야 합니다',
'Need to specify a Budget!': '예산 지정해야 합니다!',
'Need to specify a Kit!': '킷을 지정해야 합니다!',
'Need to specify a Resource!': '자원을 지정해야 합니다!',
'Need to specify a bundle!': '번들을 지정해야 합니다!',
'Need to specify a group!': '그룹을 지정해야 합니다!',
'Need to specify a location to search for.': '위치를 지정하도록 합니다.',
'Need to specify a role!': '역할을 지정해야 합니다!',
'Need to specify a table!': '테이블을 지정해야 합니다!',
'Need to specify a user!': '사용자가 지정해야 합니다!',
'Need updated': '갱신된 합니다',
'Needs Details': '필요한 세부사항',
'Needs Maintenance': '필요한 유지보수',
'Needs to reduce vulnerability to violence': '취약성 요구에 대한 violence 줄이기 위해',
'Needs': '요구사항',
'Negative Flow Isolation': '음수 플로우 분리',
'Neighborhood': '인접',
'Neighbouring building hazard': '에지와 빌드 위험',
'Network': '네트워크',
'New Assessment reported from': '새 평가 에서 보고됩니다',
'New Certificate': '새 인증서',
'New Checklist': '새 체크리스트',
'New Entry': '새 항목',
'New Event': '새 이벤트',
'New Item Category': '새 항목 카테고리',
'New Job Role': '새 작업 역할',
'New Location Group': '새 위치 그룹',
'New Location': '새 위치',
'New Peer': '새 피어',
'New Record': '새 레코드',
'New Request': '새 요청',
'New Scenario': '새 시나리오',
'New Skill': '새 기술',
'New Solution Choice': '새 솔루션 선택',
'New Staff Member': '새 스태프 구성원',
'New Support Request': '새 지원 요청',
'New Synchronization Peer': '새 동기화 피어',
'New Team': '신규 팀',
'New Training Course': '새 교육 과정',
'New Volunteer': '새 지원자',
'New cases in the past 24h': '새 유스 지난 24h',
'New': '신규',
'News': '뉴스',
'No Activities Found': '활동이 없음',
'No Alternative Items currently registered': '대체 현재 등록된',
'No Assessment Summaries currently registered': 'no 현재 등록된 평가 요약',
'No Assessments currently registered': '현재 등록된 평가',
'No Assets currently registered in this event': 'no 자산을 현재 이 이벤트에 등록되어',
'No Assets currently registered in this scenario': 'no 자산을 현재 이 시나리오에서 등록된',
'No Assets currently registered': 'no 현재 등록된 자산',
'No Baseline Types currently registered': '기준선 없음 현재 등록된 유형',
'No Baselines currently registered': '현재 등록된 기준선',
'No Brands currently registered': 'no 현재 등록된 브랜드',
'No Budgets currently registered': '현재 등록된 예산',
'No Bundles currently registered': 'no 현재 등록된 번들',
'No Camp Services currently registered': 'no 자녀를 현재 등록된 서비스',
'No Camp Types currently registered': 'no 자녀를 현재 등록된 유형',
'No Camps currently registered': 'camps 현재 등록된 없음',
'No Catalog Items currently registered': '카탈로그 항목 현재 등록된',
'No Catalogs currently registered': '현재 등록된 catalogs',
'No Checklist available': 'no 점검 사용',
'No Cluster Subsectors currently registered': 'subsectors 현재 등록된 클러스터',
'No Clusters currently registered': 'no 클러스터에 현재 등록되어',
'No Commitment Items currently registered': '확약 현재 등록된',
'No Credentials currently set': '현재 권한 세트',
'No Details currently registered': '현재 등록된 세부사항이 없습니다',
'No Documents found': '문서를 찾을 수 없습니다',
'No Donors currently registered': '현재 등록된 기증자가 없습니다',
'No Events currently registered': '현재 등록된 이벤트를',
'No Facilities currently registered in this event': '어떤 기능을 현재 이 이벤트에 등록되어',
'No Facilities currently registered in this scenario': '이 시나리오에 현재 등록된 시설물들이 없습니다',
'No Feature Layers currently defined': '기능 계층 현재 정의된',
'No Flood Reports currently registered': '홍수 없음 현재 등록된 보고서',
'No Groups currently defined': '현재 정의된 그룹이 없음',
'No Groups currently registered': '현재 등록된 그룹이 없음',
'No Hospitals currently registered': 'no 현재 등록된 병원',
'No Human Resources currently registered in this event': '이 이벤트에 현재 등록된 인적 자원이 없습니다',
'No Human Resources currently registered in this scenario': '이 시나리오에 현재 등록된 인적자원이 없습니다',
'No Identification Report Available': 'id 보고서 사용',
'No Identities currently registered': 'no 를 현재 등록된',
'No Image': '이미지 없음',
'No Images currently registered': '이미지가 현재 등록된',
'No Impact Types currently registered': '영향을 현재 등록된 유형',
'No Impacts currently registered': 'no 현재 등록된 영향주기',
'No Incident Reports currently registered': 'no 인시던트 현재 등록된 보고서',
'No Incoming Shipments': '들어오는 운송물을',
'No Item Categories currently registered': '항목이 현재 등록된 범주',
'No Item Packs currently registered': '항목 팩 현재 등록된',
'No Items currently registered in this Inventory': '현재 목록에 등록된 물품이 없습니다.',
'No Items currently registered': '현재 등록된 항목이 없음',
'No Keys currently defined': '키가 현재 정의된',
'No Kits currently registered': '현재 등록된 상품',
'No Level 1 Assessments currently registered': '레벨 1 현재 등록된 평가',
'No Level 2 Assessments currently registered': '레벨 2 현재 등록된 평가',
'No Locations currently available': '현재 위치가 사용',
'No Locations currently registered': '현재 위치가 등록되어',
'No Map Configurations currently defined': '현재 계획 구성이 정의되지 않았습니다',
'No Map Configurations currently registered in this event': '현재 이 이벤트에 등록된 계획 구성이 없습니다',
'No Map Configurations currently registered in this scenario': '현재 이 시나리오에 등록된 계획 구성이 없습니다',
'No Markers currently available': '표시문자 없음 현재 사용',
'No Match': '일치 없음',
'No Matching Catalog Items': '일치하는 카탈로그 항목',
'No Matching Items': '일치하는 항목 없음',
'No Matching Records': '일치하는 레코드가 없습니다.',
'No Members currently registered': '멤버가 현재 등록된',
'No Memberships currently defined': 'no 멤버쉽을 현재 정의된',
'No Messages currently in Outbox': '메시지가 현재 편지함에',
'No Need Types currently registered': '어떤 유형의 현재 등록된 합니다',
'No Needs currently registered': '현재 등록되어야 합니다',
'No Offices currently registered': 'no offices 현재 등록된',
'No Offices found!': 'no offices 찾을 수 없습니다!',
'No Organizations currently registered': '현재 등록된 조직이 없습니다',
'No People currently registered in this camp': '현재 등록된 사용자 이 캠프 에',
'No People currently registered in this shelter': '현재 등록된 사용자 shelter 이 있는',
'No Persons currently registered': '어떤 사용자가 현재 등록되어',
'No Persons currently reported missing': '어떤 사람이 현재 누락 보고된',
'No Persons found': '어떤 사람이 없음',
'No Photos found': '사진 없음 없음',
'No Picture': '그림이 없습니다.',
'No Population Statistics currently registered': 'no 인구 통계 현재 등록된',
'No Presence Log Entries currently registered': 'no presence 로그 항목이 현재 등록된',
'No Problems currently defined': '현재 정의된 문제가',
'No Projections currently defined': 'no projections 현재 정의된',
'No Projects currently registered': '프로젝트가 현재 등록된',
'No Rapid Assessments currently registered': 'no rapid 현재 등록된 평가',
'No Received Items currently registered': 'no 받은 현재 등록된',
'No Received Shipments': 'no 받은 운송물을',
'No Records currently available': '레코드가 현재 사용',
'No Request Items currently registered': '요청이 현재 등록된',
'No Requests': '요청 없음',
'No Rivers currently registered': 'no 현재 등록된 강',
'No Roles currently defined': '현재 정의된 역할 없음',
'No Rooms currently registered': 'no 미팅룸의 현재 등록된',
'No Scenarios currently registered': '어떤 시나리오의 현재 등록된',
'No Sections currently registered': '현재 등록된 섹션이 없습니다',
'No Sectors currently registered': 'no 섹터를 현재 등록된',
'No Sent Items currently registered': '전송된 항목 없음 현재 등록된',
'No Sent Shipments': 'no 보낸 운송물을',
'No Settings currently defined': '현재 정의된 설정이',
'No Shelter Services currently registered': 'no shelter 현재 등록된 서비스',
'No Shelter Types currently registered': 'no shelter 현재 등록된 유형',
'No Shelters currently registered': '현재 등록된 shelters',
'No Solutions currently defined': 'no 현재 정의된 솔루션',
'No Staff Types currently registered': 'no staff 현재 등록된 유형',
'No Subscription available': '사용가능한 받아보기가 없습니다',
'No Subsectors currently registered': 'subsectors 현재 등록된 없음',
'No Support Requests currently registered': '현재 등록된 지원 요청이 없습니다',
'No Survey Answers currently entered.': '현재 입력된 조사 응답합니다.',
'No Survey Questions currently registered': '현재 등록된 설문용 질문이 없습니다',
'No Survey Series currently registered': '현재 등록된 설문 시리즈가 없습니다',
'No Survey Template currently registered': '조사 템플리트 현재 등록된',
'No Tasks with Location Data': '태스크가 데이터 위치',
'No Teams currently registered': '현재 등록된 팀이',
'No Themes currently defined': 'no 현재 정의된 주제',
'No Tickets currently registered': '현재 등록된 티켓',
'No Tracks currently available': 'no 추적합니다 현재 사용',
'No Users currently registered': '사용자가 현재 등록된',
'No Volunteers currently registered': 'volunteers 현재 등록된 없음',
'No Warehouses currently registered': 'no 웨어하우스를 현재 등록된',
'No access at all': '모든 액세스',
'No access to this record!': '이 액세스 레코드에!',
'No action recommended': '권장 조치',
'No conflicts logged': '충돌이 logged',
'No contact information available': '문의처 정보 사용',
'No contacts currently registered': 'no 현재 등록된 연락처',
'No data in this table - cannot create PDF!': '이 테이블-데이터가 pdf 작성할 수 없습니다!',
'No databases in this application': '데이터베이스가 이 응용프로그램에',
'No dead body reports available': '사망자 보고서 사용불가',
'No entries found': '항목을 찾을 수 없습니다',
'No entries matching the query': '항목이 일치하는 조회',
'No entry available': '사용 가능한 항목 없음',
'No location known for this person': '위치가 알려진 이 사람은',
'No locations found for members of this team': '위치가 이 팀 구성원의 수',
'No log entries matching the query': '로그 항목이 일치하는 조회',
'No messages in the system': '메시지가 시스템에서',
'No peers currently registered': 'no 피어의 현재 등록된',
'No pending registrations found': '지연 등록을 찾을 수 없음',
'No pending registrations matching the query': '지연 등록을 조회와 일치하는',
'No person record found for current user.': '어떤 사용자가 현재 사용자에 대한 레코드를 찾을 수 없습니다.',
'No problem group defined yet': '문제점 그룹 아직 정의되지',
'No records matching the query': '조회와 일치하는 레코드가',
'No reports available.': '사용 가능한 보고서가 없습니다.',
'No reports currently available': '현재 사용 가능한 보고서',
'No requests found': '찾은 요청이 없습니다.',
'No resources currently reported': '자원이 현재 보고됩니다',
'No service profile available': '서비스 프로파일 사용',
'No skills currently set': 'no 현재 기술 세트',
'No staff or volunteers currently registered': '직원 또는 no volunteers 현재 등록된',
'No status information available': '상태 정보 사용',
'No synchronization': '동기화 안함',
'No tasks currently registered': '현재 등록된 타스크 없음',
'No template found!': '템플리트!',
'No units currently registered': '현재 등록된 장치가 없음',
'No volunteer availability registered': 'no 자발적으로 가용성 등록한',
'Non-structural Hazards': '비구조적 위험',
'None (no such record)': '없음 (예: 레코드)',
'None': '없음',
'Normal': '정상',
'Not Applicable': '적용할 수 없음',
'Not Authorised!': '권한이 없습니다!',
'Not Possible': '가능하지 않음',
'Not Set': '설정 안 함',
'Not Authorized': '권한이 없습니다',
'Not installed or incorrectly configured.': '설치되지 않았거나 잘못 구성되었습니다.',
'Not yet a Member of any Group': 'no 멤버쉽을 현재 등록된',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': '이 목록에는 활성 volunteers 표시합니다. 시스템에 등록된 모든 사용자가 보고, 이 화면에서 대신 검색',
'Notice to Airmen': '통지 airmen',
'Number of Columns': '열 수',
'Number of Patients': '환자 번호 중',
'Number of Rows': '행 수',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': '이 단위 24 시간 내에서 될 것으로 예상되는 유형의 추가 의료용 수입니다.',
'Number of alternative places for studying': '번호 대체 자릿수를 연구하여 대한',
'Number of available/vacant beds of that type in this unit at the time of reporting.': '이 장치는 해당 유형의 사용/빈 의료용 보고 시.',
'Number of deaths during the past 24 hours.': 'deaths 지난 24 시간 동안 수입니다.',
'Number of discharged patients during the past 24 hours.': '방전된 환자 지난 24 시간 동안 수입니다.',
'Number of doctors': '번호 의사)',
'Number of in-patients at the time of reporting.': 'in-환자를 보고 시.',
'Number of newly admitted patients during the past 24 hours.': '새로 admitted 환자의 지난 24 시간 동안 수입니다.',
'Number of non-medical staff': '수가 의료 인력을',
'Number of nurses': 'number 간호사가 의',
'Number of private schools': '개인용 학교 중',
'Number of public schools': '공용 학교 중',
'Number of religious schools': 'number 종교 학교 중',
'Number of residential units not habitable': '번호 habitable 아닌 residential 장치',
'Number of residential units': 'number residential 장치',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '빈/사용 의료용 이 병원 에 수입니다. 자동으로 매일 보고서 에서 갱신되었습니다.',
'Number of vacant/available units to which victims can be transported immediately.': '빈/사용 하는 장치에 희생 즉시 전송할 수 있습니다.',
'Number or Label on the identification tag this person is wearing (if any).': '번호 또는 레이블 id 태그에 있는 경우 wearing 이 (가).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': '번호 또는 코드 위치를 찾으려면 (예: 플래그 코드, 좌표 격자, 사이트 참조 번호 또는 유사한 (사용 가능한 경우) 을 표시하는 데 사용됩니다.',
'Number': '번호',
'Number/Percentage of affected population that is Female & Aged 0-5': '번호/백분율 여성 & 오래되었습니다 0-5 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 13-17': '번호/백분율 여성 & 오래되었습니다 13-17 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 18-25': '번호/백분율 여성 & 오래되었습니다 18-25 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 26-60': '번호/백분율 여성 & 오래되었습니다 26-60 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 6-12': '번호/백분율 여성 & 오래되었습니다 6-12 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 61+': '번호/백분율 여성 & 유효 61+ 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 0-5': '번호/백분율 남성 & 오래되었습니다 0-5 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 13-17': '번호/백분율 남성 & 오래되었습니다 13-17 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 18-25': '번호/백분율 남성 & 오래되었습니다 18-25 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 26-60': '번호/백분율 남성 & 오래되었습니다 26-60 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 6-12': '번호/백분율 남성 & 오래되었습니다 6-12 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 61+': '번호/백분율 남성 및 유효 61+ 영향을 모집단의',
'Nursery Beds': 'nursery 의료용',
'Nutrition problems': 'nutrition 문제점',
'OK': '확인',
'OR Reason': '또는 이유',
'OR Status Reason': '상태 이유',
'OR Status': '또는 상태',
'Observer': '관찰자',
'Obsolete': '사용되지 않음',
'Office Address': '사무실 주소',
'Office Details': 'office 세부사항',
'Office Phone': '사무실 전화번호',
'Office added': 'office 추가',
'Office deleted': 'office 삭제됨',
'Office updated': '사무실 갱신된',
'Office': '사무실',
'Offices & Warehouses': '사무실 및 창고',
'Offline Sync (from USB/File Backup)': '오프라인 동기화 (usb/백업에서)',
'Offline Sync': '오프라인 동기화',
'Older people as primary caregivers of children': '이전 사용자 하위의 caregivers 차)',
'Older people in care homes': '이전 사용자 관리 홈)',
'Older people participating in coping activities': '이전 사용자 활동을 복사하는 참여',
'Older person (>60 yrs)': '이전 (>60 세의)',
'On by default? (only applicable to Overlays)': '기본적으로? (적용) 오버레이',
'On by default?': '기본적으로?',
'One Time Cost': '일회 비용',
'One time cost': '일회 비용',
'One-time costs': '일회 비용',
'One-time': '하나-시간',
'Oops! something went wrong on our side.': ':NONE. 다른 우리 측의 잘못되었으며.',
'Opacity (1 for opaque, 0 for fully-transparent)': '불투명도 (opaque 의 1, 완전히 투명합니다 0)',
'Open area': '열린 영역',
'Open recent': '최신 문서 열기',
'Open': '열기',
'Operating Rooms': '미팅룸 운영',
'Optional link to an Incident which this Assessment was triggered by.': '선택적 링크 이 평가에 의해 트리거된 인시던트에.',
'Optional': '선택적',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': '선택사항입니다. 사용자가 양식 속성의 값을 기반으로 하는 기능, 여기에 속성을 선택하십시오.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '선택사항입니다. geoserver, 이 작업공간에 네임스페이스 uri 입니다 (!). wfs getcapabilities 내에서 이 콜론 (:) featuretype 이름 부분.',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': '선택사항입니다. 컨텐츠가 이미지 파일의 url 팝업을 넣어야 하는 요소의 이름입니다.',
'Optional. The name of an element whose contents should be put into Popups.': '선택사항입니다. 컨텐츠가 팝업을 넣어야 하는 요소의 이름입니다.',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': '선택사항입니다. 스키마의 이름입니다. geoserver 이 양식을 http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'Options': '옵션',
'Organization Details': '조직 세부사항',
'Organization Registry': '조직 레지스트리',
'Organization added': '조직 추가',
'Organization deleted': '조직 삭제',
'Organization updated': '조직 갱신',
'Organization': '조직',
'Organizations': '조직',
'Origin of the separated children': '원점은 분리된 하위',
'Other (describe)': '기타(설명)',
'Other (specify)': '기타 (자세히 기술)',
'Other Evidence': '다른 증거를',
'Other Faucet/Piped Water': '다른 faucet/물 파이프',
'Other Isolation': '다른 분리',
'Other Name': '기타 이름',
'Other activities of boys 13-17yrs before disaster': '기타 활동 boys 13-17yrs 전에 피해',
'Other activities of boys 13-17yrs': '기타 활동 boys 13-17yrs',
'Other activities of boys <12yrs before disaster': '기타 활동 boys <12yrs 재해 전에',
'Other activities of boys <12yrs': '기타 활동 boys <12yrs',
'Other activities of girls 13-17yrs before disaster': '기타 활동 girls 13-17yrs 재해 전에',
'Other activities of girls 13-17yrs': '기타 활동 girls 13-17yrs',
'Other activities of girls<12yrs before disaster': '기타 활동 girls<12yrs 전에 피해',
'Other activities of girls<12yrs': '기타 활동 girls<12yrs',
'Other alternative infant nutrition in use': '다른 대체 infant nutrition 사용',
'Other alternative places for study': '다른 대체 위치에 대한 연구',
'Other assistance needed': '기타 지원이 필요',
'Other assistance, Rank': '기타 지원, rank',
'Other current health problems, adults': '기타 문제점, 현재 상태 adults',
'Other current health problems, children': '다른 현재 health 문제점 하위',
'Other events': '기타 이벤트',
'Other factors affecting school attendance': '다른 학교 현황을 영향을',
'Other major expenses': '다른 주요 비용',
'Other non-food items': '기타 식품 항목',
'Other recommendations': '기타 권장사항',
'Other residential': '다른 지역에서',
'Other school assistance received': '다른 학교 지원을 받은',
'Other school assistance, details': '다른 학교, 세부사항',
'Other school assistance, source': '다른 학교 지원 소스',
'Other settings can only be set by editing a file on the server': '기타 설정은 서버에 있는 파일을 편집하여 설정할 수 있습니다',
'Other side dishes in stock': '다른 dishes 재고에',
'Other types of water storage containers': '다른 유형의 워터마크 저장영역 컨테이너',
'Other ways to obtain food': '다른 방법으로 확보하기 위해 식품',
'Other': '기타',
'Outbound Mail settings are configured in models/000_config.py.': '아웃바운드 메일 설정을 models/000_config. py 구성됩니다.',
'Outbox': '보낼 편지함',
'Outgoing SMS Handler': 'sms 전송 핸들러',
'Outgoing SMS handler': 'sms 전송 핸들러',
'Overall Hazards': '전체 위험',
'Overhead falling hazard': '오버헤드 폴백하기 위험',
'Overland Flow Flood': 'overland 플로우 플러드',
'Owned Resources': '소유한 자원',
'PIN number': 'PIN 번호',
'PIN': '핀',
'PL Women': 'pl 여성',
'Pack': '팩',
'Packs': '팩',
'Parameters': '매개변수',
'Parent Office': '상위 사무실',
'Parent needs to be of the correct level': '올바른 상위 레벨의 합니다',
'Parent needs to be set for locations of level': '상위 레벨의 위치를 설정해야 합니다',
'Parent needs to be set': '상위 설정해야 합니다',
'Parent': '상위',
'Parents/Caregivers missing children': '상위/caregivers 하위 누락',
'Partial': '일부',
'Participant': '참가자',
'Pashto': '파슈토어',
'Pass': '패스',
'Passport': '패스포트',
'Path': '경로',
'Patients': '환자',
'Peer Details': '피어 세부사항',
'Peer Registration Details': '피어 등록 세부사항',
'Peer Registration Request': '피어 등록 요청',
'Peer Registration': '피어 등록',
'Peer Type': '피어 유형',
'Peer UID': '피어 uid',
'Peer added': '피어 추가',
'Peer deleted': '피어 삭제',
'Peer not allowed to push': '피어 않을 수 누름',
'Peer registration request added': '피어 등록 요청 추가',
'Peer registration request deleted': '피어 등록 요청 삭제',
'Peer registration request updated': '피어 등록 요청이 갱신될',
'Peer updated': '피어 갱신된',
'Peer': '피어',
'Peers': '피어와',
'Pending Requests': '보류 중인 요청',
'Pending': '보류 중',
'People Needing Food': '사용자 하는 식품',
'People Needing Shelter': '사용자 shelter 필요',
'People Needing Water': '사용자 물이 필요',
'People Trapped': '사용자 트랩된',
'People': '사용자',
'Performance Rating': '성능 평가',
'Person 1': '사용자 1',
'Person 1, Person 2 are the potentially duplicate records': '사용자 1, 개인 2 잠재적으로 중복 레코드가 있습니다',
'Person 2': '사용자 2',
'Person De-duplicator': 'de 개인-duplicator',
'Person Details': '개인 세부사항',
'Person Registry': '사용자 레지스트리',
'Person added to Group': '그룹 구성원 추가',
'Person added to Team': '그룹 구성원 추가',
'Person added': '개인 추가됨',
'Person deleted': '개인 삭제',
'Person details updated': '사용자 세부사항 갱신',
'Person interviewed': '개인 interviewed',
'Person who has actually seen the person/group.': '실제로 사용자/그룹에 표시되는 있습니다.',
'Person': '사용자',
'Person/Group': '사용자/그룹',
'Personal Data': '개인 데이터',
'Personal Effects Details': '개인 효과 세부사항',
'Personal Effects': '개인 효과',
'Personal Map': '개인 맵',
'Personal Profile': '개인 프로파일',
'Personal impact of disaster': '개인 impact 피해',
'Persons in institutions': '사용자 단체)',
'Persons with disability (mental)': '개인 disability (정신적)',
'Persons with disability (physical)': '개인 disability (실제)',
'Persons': '개인',
'Phone 1': '전화 1',
'Phone 2': '전화 2',
'Phone': '전화',
'Phone/Business': '전화/비즈니스',
'Phone/Emergency': '전화/비상',
'Phone/Exchange (Switchboard)': '전화/exchange (교환원)',
'Photo Details': '사진 세부사항',
'Photo Taken?': '사진 가져옵니다?',
'Photo added': '사진 추가',
'Photo deleted': '사진 삭제',
'Photo updated': '사진 갱신',
'Photo': '사진',
'Photograph': '사진',
'Photos': '사진',
'Physical Description': '물리적 설명',
'Physical Safety': '물리적 안전',
'Picture upload and finger print upload facility': 'picture 업로드하고 finger print 업로드하는 기능',
'Picture': '그림',
'Place of Recovery': '대신 복구',
'Place': '장소',
'Places for defecation': '작업공간 defecation 에',
'Places the children have been sent to': '하위에도 전송되지 않은 작업공간',
'Playing': '재생',
'Please correct all errors.': '모든 오류를 정정하십시오.',
'Please enter a first name': '이름을 입력하십시오.',
'Please enter a site OR a location': '먼저 사이트를 입력하십시오 또는 위치',
'Please enter the first few letters of the Person/Group for the autocomplete.': '아래의 개인/그룹은 autocomplete 의 처음 몇 글자를 입력하십시오.',
'Please enter the recipient': '아래 수신인.',
'Please fill this!': '이 채우십시오!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': '참고하고자 하는 URL 페이지, 발생할 것으로 예측되는 것과 실제로 발생한 것에 대한 설명을 제공해 주시기 바랍니다.',
'Please report here where you are:': '현재 위치를 여기에 알려주세요:',
'Please select another level': '다른 레벨을 선택해 주세요',
'Please select': '선택해주세요',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '사인-귀하의 휴대폰에 이 같이 us 텍스트 메시지를 보낼 수 있습니다. 전체 지역 코드를 포함하십시오.',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '자세한 질병 적절한 처리 및 문제가 되는 자세히 (숫자, 해당) 를 지정하십시오. 또한 상황은 개선될 수 제안을 추가할 수 있습니다.',
'Please use this field to record any additional information, including a history of the record if it is updated.': '이를 갱신되는 경우 이 필드에 추가 정보를, 레코드 히스토리를 비롯한 레코드 수.',
'Please use this field to record any additional information, including any Special Needs.': '이 필드에 추가 정보를 비롯하여 특수한 기록하십시오.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': '이 필드에 임의의 추가 정보 (예: ushahidi 인스턴스 id 를 기록하십시오. 이 경우 갱신된 레코드 히스토리를 포함합니다.',
'Pledge Support': 'pledge 지원',
'Point': '지점',
'Police': '경찰',
'Pollution and other environmental': '및 기타 환경 오염',
'Polygon reference of the rating unit': '다각형 참조는 등급 장치',
'Polygon': '다각형',
'Poor': '나쁨',
'Population Statistic Details': '인구 통계 세부사항',
'Population Statistic added': '인구 통계 추가',
'Population Statistic deleted': '인구 통계 삭제',
'Population Statistic updated': '인구 통계 갱신',
'Population Statistics': '인구 통계',
'Population and number of households': '채우기 및 households 의 번호',
'Population': '인구',
'Popup Fields': '팝업 필드',
'Popup Label': '팝업 레이블',
'Port Closure': '포트 처리완료',
'Position Catalog': '위치 카탈로그',
'Position Details': '위치 세부사항',
'Position added': '위치 추가',
'Position deleted': '위치 삭제',
'Position updated': '위치 갱신',
'Position': '위치',
'Positions': '위치',
'Postcode': '우편 번호',
'Poultry restocking, Rank': 'restocking 가금류, 랭크',
'Poultry': '가금류',
'Pounds': '파운드',
'Power Failure': '전원 장애',
'Pre-cast connections': '사전 cast 연결',
'Preferred Name': '선호하는 이름',
'Pregnant women': 'pregnant 여성',
'Preliminary': '예비',
'Presence Condition': '존재 조건',
'Presence Log': '인식 로그',
'Presence': '인식',
'Primary Occupancy': '기본 occupancy',
'Priority from 1 to 9. 1 is most preferred.': '1 에서 9 로 우선순위입니다. 1 가장 좋습니다.',
'Private': '개인용',
'Problem Administration': '문제점 관리',
'Problem Details': '출고 세부사항',
'Problem Group': '문제점 그룹',
'Problem Title': '문제점 제목',
'Problem added': '추가 문제점',
'Problem connecting to twitter.com - please refresh': '문제점 twitter.com-please refresh 연결',
'Problem deleted': '문제점 삭제됨',
'Problem updated': '문제점 갱신됨',
'Problem': '문제점',
'Problems': '문제',
'Procedure': '절차',
'Process Received Shipment': '프로세스 받은 운송',
'Process Shipment to Send': '프로세스 shipment 보내기',
'Profile': '프로파일',
'Project Details': '프로젝트 세부사항',
'Project Status': '프로젝트 상태',
'Project added': '프로젝트 추가',
'Project deleted': '프로젝트 삭제',
'Project has no Lat/Lon': '프로젝트 lat/₩.',
'Project updated': '프로젝트 갱신',
'Project': '프로젝트',
'Projection Details': '프로젝션 세부사항',
'Projection added': '프로젝션을 추가됩니다',
'Projection deleted': '프로젝션 삭제',
'Projection updated': '갱신된 투영',
'Projection': '프로젝션',
'Projections': '프로젝션',
'Projects': '프로젝트',
'Property reference in the council system': '등록 정보 참조, council 시스템에서',
'Protected resource': '보호 자원',
'Protection': '보호',
'Provide Metadata for your media files': '매체 파일에 대한 메타데이터 제공',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '전체 빌드 또는 손상을 지점의 선택적 스케치가 제공합니다. 손상이 있음을 가리킵니다.',
'Proxy-server': '프록시 서버',
'Psychiatrics/Adult': 'psychiatrics/성인',
'Psychiatrics/Pediatric': 'psychiatrics/pediatric',
'Public Event': '공용 이벤트',
'Public and private transportation': '공용 및 개인용 교통',
'Public assembly': 'public 어셈블리',
'Public': 'public',
'Pull tickets from external feed': '외부 피드에서 티켓 pull',
'Punjabi': '펀잡어',
'Purchase Date': '구매일',
'Push tickets to external system': '티켓을 외부 시스템에 밀어넣습니다',
'Pyroclastic Flow': '화산암 유동',
'Pyroclastic Surge': 'pyroclastic surge',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'python 직렬 모듈 사용 중인 python-이 내에 모뎀을 활성화하는 데 필요한 설치',
'Python needs the ReportLab module installed for PDF export': 'reportlab 모듈 사용 중인 python-이 내에서 pdf 출력 installing 합니다!',
'Quantity Committed': '커미트된 수량',
'Quantity Fulfilled': '이행되었을 수량',
'Quantity in Transit': '운송 중인 수량',
'Quantity': '수량',
'Quarantine': '검역소로 격리',
'Queries': '쿼리',
'Query': '쿼리',
'Queryable?': '조회?',
'RC frame with masonry infill': 'rc masonry infill 함께 프레임',
'RECORD A': '레코드 A',
'RECORD B': '레코드 B',
'Race': '레이스',
'Radio Callsign': '선택 callsign',
'Radiological Hazard': 'radiological 위험',
'Railway Accident': '철도 사고',
'Railway Hijacking': '철도 하이잭',
'Rain Fall': '비가 복귀',
'Rapid Assessment Details': '긴급 평가 세부사항',
'Rapid Assessment added': '긴급 평가 추가',
'Rapid Assessment deleted': '긴급 평가 삭제',
'Rapid Assessment updated': '긴급 평가 갱신',
'Rapid Assessment': '빠른 평가',
'Rapid Assessments & Flexible Impact Assessments': '긴급 평가 및 유연한 영향 평가',
'Rapid Assessments': '긴급 평가',
'Rapid Close Lead': 'rapid 닫으십시오 lead',
'Rapid Data Entry': '신속한 데이터 항목',
'Raw Database access': '원시 데이터베이스 액세스',
'Receive New Shipment': '수신 새 운송',
'Receive Shipment': '납품 받기',
'Receive this shipment?': '이 shipment?',
'Receive': 'receive',
'Received By Person': '에서 받은 사람',
'Received By': '입고자',
'Received Item Details': '수신된 항목 세부사항',
'Received Item deleted': '수신된 항목 삭제',
'Received Item updated': '수신된 항목 갱신',
'Received Shipment Details': '수신된 발송물 세부사항',
'Received Shipment canceled and items removed from Inventory': '수신된 shipment 취소되고 인벤토리에서 항목이 제거됩니다',
'Received Shipment canceled': '수신된 shipment 취소됨',
'Received Shipment updated': '수신된 shipment 갱신된',
'Received Shipments': '수신된 운송물을',
'Received': '받은 날짜',
'Receiving and Sending Items': '수신 및 송신 항목',
'Recipient': '받는 사람',
'Recipients': '수신인',
'Recommendations for Repair and Reconstruction or Demolition': '권장사항 수리 및 복원 또는 demolition 대한',
'Record Details': '레코드 세부사항',
'Record Saved': '레코드가 저장되었습니다',
'Record added': '레코드가 추가됨',
'Record any restriction on use or entry': 'use 또는 entry 에서 어떤 레코드 제한',
'Record deleted': '레코드 삭제됨',
'Record last updated': '마지막으로 갱신되 레코드',
'Record not found!': '레코드를 찾을 수 없습니다!',
'Record not found': '레코드를 찾지 못함',
'Record updated': '레코드 갱신됨',
'Record': '레코드',
'Recording and Assigning Assets': '자산 기록 및 할당',
'Records': '레코드(기록)',
'Recovery Request added': '복구 요청 추가',
'Recovery Request deleted': '복구 삭제 요청',
'Recovery Request updated': '복구 갱신된 요청',
'Recovery Request': '복구 요청',
'Recovery Requests': '복구 요청',
'Recovery': '복구',
'Recurring Cost': '반복 비용',
'Recurring cost': '반복 비용',
'Recurring costs': '반복 비용',
'Recurring': '반복',
'Red Cross / Red Crescent': '빨간색/red crescent',
'Red': '적색',
'Reference Document': '참조 문서',
'Refresh Rate (seconds)': '새로 고치기 비율(초)',
'Region Location': '지역 위치',
'Regional': '지역',
'Regions': '지역/지구',
'Register Person into this Camp': '이 등록 캠프 에',
'Register Person into this Shelter': '이 등록 shelter 로',
'Register Person': '사용자 등록',
'Register them as a volunteer': '레지스터 이를 자발적으로)',
'Register': '등록',
'Registered People': '등록된 사용자',
'Registered users can': '등록된 사용자 수',
'Registration Details': '등록 세부사항',
'Registration added': '등록 추가',
'Registration entry deleted': '등록 항목 삭제',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '등록 여전히 승인자 (%s) - 승인 보류 중입니다. 받은 확인 때까지 기다리십시오.',
'Registration updated': '등록 갱신',
'Registration': '등록',
'Rehabilitation/Long Term Care': 'rehabilitation/장기 지원',
'Rejected': '거부된 날짜',
'Relief Team': '팀 릴리프',
'Relief': '릴리프',
'Religious Leader': '종교 리더',
'Religious': '종교',
'Relocate as instructed in the <instruction>': '에 지시된 대로 재배치<instruction>',
'Remove Asset from this event': '이 이벤트 자산 제거',
'Remove Asset from this scenario': '이 시나리오는 자산 제거',
'Remove Facility from this event': '이 이벤트에서 기능 제거',
'Remove Facility from this scenario': '이 시나리오는 에서 설비 제거',
'Remove Human Resource from this event': '이 이벤트에서 인적 자원 제거',
'Remove Human Resource from this scenario': '이 시나리오에서 인적 자원 제거',
'Remove Item from Inventory': '재고 항목 제거',
'Remove Map Configuration from this event': '이 이벤트는 맵 구성 제거',
'Remove Map Configuration from this scenario': '이 시나리오에서는 맵 구성 제거',
'Remove Person from Group': '멤버쉽 삭제',
'Remove Person from Team': '멤버쉽 삭제',
'Remove this asset from this event': '이 이벤트에서 이 자산 제거',
'Remove this asset from this scenario': '이 시나리오에서 이 자산 제거',
'Remove': '제거',
'Removed from Group': '멤버쉽 삭제',
'Removed from Team': '멤버쉽 삭제',
'Repair': '수리',
'Repaired': '수리',
'Repeat your password': '암호 반복',
'Replace if Master': '대체 마스터 경우',
'Replace if Newer': '최신인 경우 바꾸기',
'Replace': '대체',
'Report Another Assessment...': '보고서는 다른 평가...',
'Report Details': '보고서 세부사항',
'Report Resource': '자원 보고서',
'Report Types Include': '보고서 유형',
'Report added': '보고서 추가',
'Report deleted': '보고서가 삭제됨',
'Report my location': '보고서 내 위치',
'Report the contributing factors for the current EMS status.': '현재 ems 상태에 대한 기여 요인.',
'Report the contributing factors for the current OR status.': '현재 또는 상태에 대한 기여 요인.',
'Report them as found': '이를 찾을 보고서',
'Report them missing': '보고서는 누락',
'Report updated': '보고서를 갱신했습니다.',
'Report': '보고',
'Reporter Name': '보고자 이름',
'Reporter': '보고자',
'Reporting on the projects in the region': '프로젝트 영역에 대한 보고',
'Reports': '보고서',
'Request Added': '요청 추가',
'Request Canceled': '요청이 취소되었습니다.',
'Request Details': '요청 세부사항',
'Request From': '요청 전송처',
'Request Item Details': '항목 세부사항 요청',
'Request Item added': '요청 항목 추가됨',
'Request Item deleted': '요청 항목 삭제',
'Request Item from Available Inventory': '요청 항목 사용 명세',
'Request Item updated': '요청 항목 갱신',
'Request Item': '요청 항목',
'Request Items': '항목을 요청',
'Request Status': '요청 상태',
'Request Type': '요청 유형',
'Request Updated': '업데이트 항목 요청',
'Request added': '요청 추가',
'Request deleted': '삭제된 요청',
'Request for Role Upgrade': '요청 역할 업그레이드',
'Request updated': '업데이트 항목 요청',
'Request': 'request',
'Request, Response & Session': '요청, 응답 및 세션',
'Requested By Facility': '요청된 설비',
'Requested By': '요청자',
'Requested From': '요청된',
'Requested Items': '요청된 항목',
'Requested by': '요청자',
'Requested on': '요구됩니다.',
'Requested': '요청됨',
'Requester': '요청자',
'Requests Management': '요청 관리',
'Requests': '요청',
'Requires Login!': '로그인이 필요합니다!',
'Reset Password': '비밀번호 재설정',
'Reset': '재설정',
'Resolve Conflict': '충돌 해결',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '분석할 링크 이 중복 레코드를 해결한 후 데이터베이스를 갱신하는 데 도움이 되는 새 화면 불러옵니다.',
'Resolve': '해결',
'Resource Details': '자원 세부사항',
'Resource added': '자원 추가',
'Resource deleted': '자원 삭제됨',
'Resource updated': '자원이 갱신됨',
'Resource': '자원',
'Resources': '자원',
'Respiratory Infections': '호흡기 infections',
'Response': '응답',
'Restricted Access': '제한된 액세스',
'Restricted Use': '사용 제한',
'Results': '결과',
'Retail Crime': '소매 범죄',
'Retrieve Password': '암호 검색',
'Return to Request': '요청 리턴',
'Return': '수익',
'Returned From': '리턴자',
'Returned': '반품됨',
'Review Incoming Shipment to Receive': '검토 수신 선적 수신',
'Rice': '라이스',
'Riot': 'riot',
'River Details': '세부사항 강',
'River added': '추가된 강',
'River deleted': '삭제된 강',
'River updated': '갱신된 강',
'River': '강',
'Rivers': '강',
'Road Accident': '도로 사고',
'Road Closed': '닫혔습니다.',
'Road Conditions': '조건 로드',
'Road Delay': '도로 지연',
'Road Hijacking': '도로 하이잭',
'Road Usage Condition': '도로 사용 조건',
'Role Details': '역할 세부사항',
'Role Required': '필요한 역할',
'Role Updated': '역할 갱신',
'Role added': '역할 추가됨',
'Role deleted': '역할이 삭제됨',
'Role updated': '역할 갱신',
'Role': '역할',
'Role-based': '역할 기반',
'Roles Permitted': '허용되는 역할',
'Roles': '역할',
'Roof tile': 'roof 바둑판식',
'Roofs, floors (vertical load)': 'roofs, 바닥 (수직 load)',
'Room Details': '강의실 세부사항',
'Room added': '미팅룸 추가됩니다',
'Room deleted': '삭제된 강의실',
'Room updated': '미팅룸 갱신된',
'Room': '룸',
'Rooms': '회의실',
'Rows in table': '테이블의 행',
'Rows selected': '평균 행',
'Run Interval': '실행 간격',
'Running Cost': '운전 비용',
'Safe environment for vulnerable groups': '안전한 환경에서 취약한 그룹',
'Safety Assessment Form': '안전 평가 양식',
'Safety of children and women affected by disaster?': '여성과 어린이들의 안전이 재해로 인해 영향을 받습니까?',
'Sahana Administrator': 'sahana 관리자',
'Sahana Blue': 'sahana 파란색',
'Sahana Community Chat': 'sahana 커뮤니티 대화',
'Sahana Eden <=> Other': 'sahana eden <=> 기타',
'Sahana Eden Humanitarian Management Platform': 'sahana eden humanitarian 관리 플랫폼',
'Sahana Eden Website': 'sahana eden 웹 사이트',
'Sahana Green': 'sahana 초록색',
'Sahana Steel': 'sahana steel',
'Sahana access granted': 'sahana 액세스 권한',
'Salted Fish': 'salted fish',
'Sanitation problems': 'sanitation 문제점',
'Saturday': '토요일',
'Save': '저장',
'Saved.': '저장했습니다.',
'Saving...': '저장 중...',
'Scale of Results': '결과의 스케일',
'Scenario Details': '시나리오 세부사항',
'Scenario added': '시나리오가 추가되었습니다',
'Scenario deleted': '시나리가 삭제되었습니다',
'Scenario updated': '시나리오 갱신된',
'Scenario': '시나리오',
'Scenarios': '시나리오',
'Schedule': '스케줄',
'Schema': '스키마',
'School Closure': '학교 처리완료',
'School Lockdown': '잠금 학교',
'School Teacher': '고등학교 교사',
'School activities': '학교 활동',
'School assistance': '학교 지원',
'School attendance': '학교 현황',
'School destroyed': '학교 destroyed',
'School heavily damaged': '학교에 많이 손상됨.',
'School tents received': '학교에서 tents 받은',
'School tents, source': 'tents 학교, 소스',
'School used for other purpose': '기타 다른 용도로 학교',
'School': '학교',
'School/studying': '학교/연구하여',
'Schools': '학교',
'Search Activities': '활동 검색',
'Search Activity Report': '활동 보고서 검색',
'Search Addresses': '주소 검색',
'Search Alternative Items': '대체 항목 검색',
'Search Assessment Summaries': '평가 요약 검색',
'Search Assessments': '검색 평가',
'Search Asset Log': '자산 로그 검색',
'Search Assets': '자산 검색',
'Search Baseline Type': '검색 기준 유형',
'Search Baselines': '기준선 검색',
'Search Brands': '검색 브랜드',
'Search Budgets': '검색 예산',
'Search Bundles': '번들 검색',
'Search Camp Services': '자녀를 서비스 검색',
'Search Camp Types': '자녀를 유형 검색',
'Search Camps': '검색 camps',
'Search Catalog Items': '카탈로그 항목 검색',
'Search Catalogs': '카탈로그 검색',
'Search Certificates': '인증서 검색',
'Search Certifications': '인증 검색',
'Search Checklists': '검색 목록',
'Search Cluster Subsectors': '클러스터의 검색 subsectors',
'Search Clusters': '클러스터에서 검색',
'Search Commitment Items': '검색 항목 확약',
'Search Commitments': '검색 commitments',
'Search Competencies': '검색 능력',
'Search Competency Ratings': '능력 등급 검색',
'Search Contact Information': '연락처 검색',
'Search Contacts': '연락처 검색',
'Search Course Certificates': 'certicates 검색 과정',
'Search Courses': '과정 검색',
'Search Credentials': '신임 검색',
'Search Documents': '문서 검색',
'Search Donors': '검색 donors',
'Search Entries': '항목 검색',
'Search Events': '이벤트 검색',
'Search Facilities': '검색 기능',
'Search Feature Layers': '검색 기능은 계층',
'Search Flood Reports': '검색 홍수 보고서',
'Search Groups': '그룹 검색',
'Search Human Resources': '인적 자원 검색',
'Search Identity': 'id 검색',
'Search Images': '이미지 검색',
'Search Impact Type': '검색 유형 영향',
'Search Impacts': '영향 검색',
'Search Incident Reports': '검색 인시던트 보고서',
'Search Inventory Items': '재고 항목 검색',
'Search Inventory items': '재고 항목 검색',
'Search Item Categories': '항목 카테고리 검색',
'Search Item Packs': '항목 을 검색',
'Search Items': '아이템 검색',
'Search Job Roles': '작업 역할 검색',
'Search Keys': '검색 키',
'Search Kits': '상품 검색',
'Search Layers': '계층 검색',
'Search Level 1 Assessments': '레벨 1 평가 검색',
'Search Level 2 Assessments': '레벨 2 평가 검색',
'Search Locations': '검색 위치',
'Search Log Entry': '로그 항목 검색',
'Search Map Configurations': '맵에서 구성 검색',
'Search Markers': '검색 마커',
'Search Members': '구성원 검색',
'Search Membership': '구성원 검색',
'Search Memberships': '구성원 검색',
'Search Missions': '검색 임무',
'Search Need Type': '필요한 검색 유형',
'Search Needs': '필요한 검색',
'Search Offices': 'offices 검색',
'Search Organizations': '조직 검색',
'Search Peer': '피어 검색',
'Search Personal Effects': '개인 검색 효과',
'Search Persons': '개인 검색',
'Search Photos': '사진 검색',
'Search Population Statistics': '인구 통계 검색',
'Search Positions': '검색 위치',
'Search Problems': '검색 문제점',
'Search Projections': '검색 투영',
'Search Projects': '검색 프로젝트',
'Search Rapid Assessments': '빠른 검색 수행',
'Search Received Items': '수신된 항목 검색',
'Search Received Shipments': '수신된 운송물을 검색',
'Search Records': '검색 레코드',
'Search Registations': 'registations 검색',
'Search Registration Request': '등록 요청 검색',
'Search Report': '보고서 검색',
'Search Request Items': '요청 항목 검색',
'Search Request': '검색 요청',
'Search Requested Items': '요청된 항목 검색',
'Search Requests': '요청 검색',
'Search Resources': '자원 검색',
'Search Rivers': '검색 강',
'Search Roles': '역할 검색',
'Search Rooms': '회의실 검색',
'Search Scenarios': '검색 시나리오',
'Search Sections': '검색 섹션',
'Search Sectors': '섹터를 검색',
'Search Sent Items': '전송된 항목 검색',
'Search Sent Shipments': '송신된 운송물을 검색',
'Search Service Profiles': '검색 서비스 프로파일',
'Search Settings': '검색 설정',
'Search Shelter Services': '검색 shelter 서비스',
'Search Shelter Types': '검색 shelter 유형',
'Search Shelters': '검색 shelters',
'Search Skill Equivalences': 'equivalences 스킬 검색',
'Search Skill Provisions': '검색 기술 조항',
'Search Skill Types': '기술 유형 검색',
'Search Skills': '기술 검색',
'Search Solutions': '해결방안 검색',
'Search Staff Types': '직원 유형 검색',
'Search Staff or Volunteer': '스태프 또는 지원자 검색',
'Search Status': '검색 상태',
'Search Subscriptions': '자동 통지 등록 검색',
'Search Subsectors': '검색 subsectors',
'Search Support Requests': '지원 요청 검색',
'Search Tasks': '작업 검색',
'Search Teams': '팀 검색',
'Search Themes': '주제 검색',
'Search Tickets': '티켓 검색',
'Search Tracks': '트랙 검색',
'Search Trainings': '검색 trainings',
'Search Twitter Tags': '트위터 태그 검색',
'Search Units': '장치 검색',
'Search Users': '사용자 검색',
'Search Volunteer Availability': '가용성 지원자 검색',
'Search Volunteers': '검색 volunteers',
'Search Warehouses': '웨어하우스에서 검색',
'Search and Edit Group': '검색 및 그룹 편집',
'Search and Edit Individual': '검색 및 개별 편집',
'Search for Staff or Volunteers': '검색 직원 또는 volunteers 대한',
'Search for a Location by name, including local names.': '위치 이름, 로컬 이름 검색.',
'Search for a Person': '사용자 검색',
'Search for a Project': '프로젝트 검색',
'Search for a shipment by looking for text in any field.': '모든 필드에 텍스트를 찾아 운송 검색하십시오.',
'Search for a shipment received between these dates': '이 날짜 사이에 수신된 발송물에 대한 검색',
'Search for an Organization by name or acronym': '이름 또는 약어 로 조직에 대한 검색',
'Search for an Organization by name or acronym.': '이름 또는 acronym 의해 조직을 검색하려면.',
'Search for an asset by text.': '텍스트 자산을 검색하십시오.',
'Search for an item by category.': '카테고리 항목을 검색하십시오.',
'Search for an item by text.': '텍스트 항목을 검색하십시오.',
'Search for asset by country.': '국가 의해 자산을 검색하십시오.',
'Search for office by country.': '국가에 따라 사무실 검색하십시오.',
'Search for office by organization.': '조직에서 사무실 검색하십시오.',
'Search for office by text.': '텍스트 검색 사무실.',
'Search for warehouse by country.': '국가 웨어하우스 검색하십시오.',
'Search for warehouse by organization.': '조직 웨어하우스 검색하십시오.',
'Search for warehouse by text.': '텍스트 웨어하우스 검색하십시오.',
'Search here for a person record in order to:': '여기에 검색 개인 레코드를 in order to:',
'Search messages': '메시지 검색',
'Search': '검색',
'Searching for different groups and individuals': '다른 그룹 및 개인에 대한 검색',
'Secondary Server (Optional)': '차 서버 (선택적)',
'Seconds must be a number between 0 and 60': '초는 0과 60 사이의 숫자이어야 합니다.',
'Section Details': '섹션 세부사항',
'Section deleted': '섹션 삭제',
'Section updated': '갱신된 절',
'Sections': '섹션',
'Sector Details': '섹터 세부사항',
'Sector added': '추가된 섹터',
'Sector deleted': '삭제된 섹터',
'Sector updated': '갱신된 부문',
'Sector': '섹터',
'Sector(s)': '섹터 (s)',
'Sectors': '섹터',
'Security Status': '보안 상태',
'Security problems': '보안 문제점',
'See All Entries': '모든 항목을 참조하십시오',
'See all': '모두 보기',
'See unassigned recovery requests': '지정되지 않은 복구 요청을 참조하십시오.',
'Select Items from the Request': '요청 항목을 선택하십시오.',
'Select Items from this Inventory': '이 재고 품목 선택',
'Select a location': '위치 선택',
'Select a question from the list': '이 목록에서 질문을 선택하십시오',
'Select a range for the number of total beds': '의료용 총 수에 대한 범위를 선택하십시오.',
'Select all that apply': '적용되는 모든 항목 선택',
'Select an Organization to see a list of offices': '조직 사무실 목록 보려면,이 옵션을 선택하십시오.',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': '각 관련된 평가 및 활동에 대한 오버레이하는 차이를 식별해야 합니다.',
'Select the person assigned to this role for this project.': '이 프로젝트에 대해 이 역할에 지정된 사용자를 선택하십시오.',
'Select to show this configuration in the Regions menu.': 'region 이 메뉴에서 구성을 선택하십시오.',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'sms 전송에 여부에 대한 모뎀, tropo 또는 기타 게이트웨이 선택합니다',
'Send Alerts using Email &/or SMS': '경보 전자 우편 및/또는 sms 를 사용하여 송신',
'Send Commitment as Shipment': '확약 (shipment 송신',
'Send New Shipment': '새 shipment 송신',
'Send Notification': '통지 전송',
'Send Shipment': '배송은 송신',
'Send a message to this person': '이 사용자에게 송신',
'Send a message to this team': '이 팀에 보내기',
'Send from %s': '% 에 보내기',
'Send message': '메시지 보내기',
'Send new message': '새 메시지 송신',
'Send': '보내기',
'Sends & Receives Alerts via Email & SMS': '송신합니다 & 수신하고 경보를 전자 우편 및 sms 통해',
'Senior (50+)': 'senior (50+)',
'Sent By Person': '에서 보낸 사람',
'Sent By': '보낸 사람',
'Sent Item Details': '보낸 항목 세부사항',
'Sent Item deleted': '보낸 항목 삭제',
'Sent Item updated': '보낸 항목 갱신',
'Sent Shipment Details': '송신된 발송물 세부사항',
'Sent Shipment canceled and items returned to Inventory': '송신된 shipment 취소되고 항목을 명세로 리턴됩니다',
'Sent Shipment canceled': '송신된 shipment 취소됨',
'Sent Shipment updated': '송신된 shipment 갱신된',
'Sent Shipments': '송신된 운송물을',
'Sent': '보낸 문서',
'Separated children, caregiving arrangements': '분리된 하위, caregiving 협정',
'Serial Number': '일련 번호',
'Series': '시리즈',
'Server': '서버',
'Service Catalog': '서비스 카탈로그',
'Service or Facility': '서비스 또는 기능',
'Service profile added': '서비스 프로파일 추가',
'Service profile deleted': '서비스 프로파일 삭제',
'Service profile updated': '서비스 프로파일 갱신',
'Service': '서비스',
'Services Available': '서비스 사용 가능',
'Services': '서비스',
'Set Base Site': '기본 사이트 설정',
'Set By': '다음 기준으로 설정',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'true 누가 mapadmins 아닌 사용자가 위치 계층 구조 레벨을 편집할 수 있도록 설정하십시오.',
'Setting Details': '세부사항 설정',
'Setting added': '설정 추가',
'Setting deleted': '삭제된 설정',
'Setting updated': '갱신 설정',
'Settings updated': '설정 갱신',
'Settings were reset because authenticating with Twitter failed': '인증 설정을 twitter 로 재설정된 때문에 실패했습니다.',
'Settings which can be configured through the web interface are available here.': '웹 인터페이스를 통해 구성할 수 있는 설정은 다음과 같습니다.',
'Settings': '설정',
'Severe': 'severe',
'Severity': '심각도',
'Share a common Marker (unless over-ridden at the Feature level)': '공통 표시문자 (겹쳐썼기 기능 레벨이 아니면 )를 공유하시오.',
'Shelter & Essential NFIs': '피난처 & 기본적인 NFI들',
'Shelter Details': '피난처 세부사항',
'Shelter Name': '피난처이름',
'Shelter Registry': 'shelter 레지스트리',
'Shelter Service Details': 'shelter 서비스 세부사항',
'Shelter Service added': 'shelter 서비스 추가',
'Shelter Service deleted': 'shelter 서비스가 삭제되었습니다',
'Shelter Service updated': 'shelter 서비스 갱신',
'Shelter Service': 'shelter 서비스',
'Shelter Services': 'shelter 서비스',
'Shelter Type Details': 'shelter 유형 세부사항',
'Shelter Type added': 'shelter 유형 추가',
'Shelter Type deleted': 'shelter 유형 삭제',
'Shelter Type updated': 'shelter 유형 갱신',
'Shelter Type': 'shelter 유형',
'Shelter Types and Services': 'shelter 유형 및 서비스',
'Shelter Types': 'shelter 유형',
'Shelter added': '은신처가 더해졌다',
'Shelter deleted': '은신처가 지워젔다',
'Shelter updated': 'shelter 갱신',
'Shelter': '피난처',
'Shelter/NFI Assistance': 'shelter/nfi 지원',
'Shipment Created': '작성된 운송물',
'Shipment Items received by Inventory': '선적 항목 받은 재고',
'Shipment Items sent from Inventory': '선적 항목 보낸 명세',
'Shipment Items': '선적 항목',
'Shipment to Send': 'shipment 보내십시오',
'Shipments To': '운송물을 수',
'Shipments': '납품',
'Shooting': '해결',
'Short Assessment': '짧은 평가',
'Short Description': '간단한 설명',
'Show Checklist': '체크 표시',
'Show Details': '세부사항 표시',
'Show Map': '맵 표시',
'Show Region in Menu?': '메뉴에서 region?',
'Show on Map': '맵에 표시',
'Show on map': '맵에 표시',
'Sign-up as a volunteer': '사인 까지 자발적으로)',
'Sign-up for Account': '계정에 대한 사인업',
'Sign-up succesful - you should hear from us soon!': '사인업 성공-다음에 즉시 에서 들을 합니다!',
'Sindhi': '신디어',
'Site Administration': '사이트 관리',
'Site': '사이트',
'Situation Awareness & Geospatial Analysis': '상황 인식 및 geospatial 분석',
'Situation': '상황',
'Sketch': '스케치',
'Skill Catalog': '기술 카탈로그',
'Skill Details': '기술 항목 정보',
'Skill Equivalence Details': '기술 동급에 대한 세부사항',
'Skill Equivalence added': '기술 동급 내용이 추가되었습니다.',
'Skill Equivalence deleted': '기술 동급 내용이 삭제되었습니다',
'Skill Equivalence updated': '기술 동급내용이 업데이트되었습니다',
'Skill Equivalence': '기술 equivalence',
'Skill Equivalences': '기술 동급들',
'Skill Provision Catalog': '기술 조항 카탈로그',
'Skill Provision Details': '기술 조항 세부사항',
'Skill Provision added': '기술 제공이 추가되었습니다.',
'Skill Provision deleted': '기술 조항이 삭제되었습니다',
'Skill Provision updated': '기술 조항이 업데이트되었습니다',
'Skill Provision': '기술 제공',
'Skill Provisions': '기술 조항들',
'Skill Status': '항목 상태',
'Skill TYpe': '기술 유형',
'Skill Type Catalog': '기술 유형 카탈로그',
'Skill Type Details': '기술 유형 세부사항',
'Skill Type added': '기술 유형 추가',
'Skill Type deleted': '기술 유형 삭제',
'Skill Type updated': '기술 유형 갱신',
'Skill Types': '스킬 유형',
'Skill added': '기술 추가',
'Skill deleted': '스킬 삭제',
'Skill updated': '기술 갱신',
'Skill': '기술',
'Skills Catalog': '기술 카탈로그',
'Skills Management': '기술 항목 관리',
'Skills': '기술',
'Skype ID': 'skype id',
'Slope failure, debris': '기울기가 실패, 이물질을',
'Small Trade': '소규모 거래',
'Smoke': '연기',
'Snapshot Report': '스냅샷 보고서',
'Snapshot': '스냅샷',
'Snow Fall': '눈 복귀',
'Snow Squall': '눈 squall',
'Soil bulging, liquefaction': '토양 bulging, liquefaction',
'Solid waste': '솔리드 폐기물',
'Solution Details': '솔루션 세부사항',
'Solution Item': '솔루션 항목',
'Solution added': '솔루션 추가',
'Solution deleted': '솔루션 삭제',
'Solution updated': '솔루션 갱신',
'Solution': 'SOLUTION',
'Solutions': '솔루션',
'Some': 'SOME',
'Sorry that location appears to be outside the area of the Parent.': '죄송합니다. 해당 위치에 상위 영역 외부에 나타납니다.',
'Sorry that location appears to be outside the area supported by this deployment.': '죄송합니다. 해당 위치에 이 deployment 의해 지원되는 영역 외부에 나타납니다.',
'Sorry, I could not understand your request': '죄송합니다, 지금 사용자의 요청을 이해할 수 없습니다.',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': '죄송합니다, mapadmin 역할을 가진 사용자만 위치 그룹을 작성할 수 있습니다.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': '죄송합니다, mapadmin 역할을 가진 사용자만 이 위치를 편집할 수 있습니다',
'Sorry, something went wrong.': '죄송합니다.',
'Sorry, that page is forbidden for some reason.': '죄송합니다, 페이지 어떤 이유로 금지됩니다.',
'Sorry, that service is temporary unavailable.': '죄송합니다, 서비스 임시 사용 불가능합니다.',
'Sorry, there are no addresses to display': '죄송합니다, 주소를 표시할 수 있습니다',
'Source ID': '소스 ID',
'Source Time': '자원 시간',
'Source': 'SOURCE',
'Sources of income': '수입원',
'Space Debris': '우주 잔해',
'Spanish': '스페인어',
'Special Ice': '특수 얼음',
'Special Marine': '특수 marine',
'Specialized Hospital': '특수화된 병원',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': '특정 영역 (예: 빌드/미팅룸) 위치 내에서 이 사람/그룹이 표시됩니다.',
'Specific locations need to have a parent of level': '특정 위치에 상위 레벨의 있어야 합니다',
'Specify a descriptive title for the image.': '이미지에 대한 설명적 제목을 지정하십시오.',
'Specify the bed type of this unit.': '이 장치의 bed 유형을 지정하십시오.',
'Specify the number of available sets': '사용 가능한 세트 수를 지정하십시오.',
'Specify the number of available units (adult doses)': '사용 가능한 장치 (성인 doses) 의 번호를 지정하십시오.',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'ren-lactate 또는 동급 솔루션 중 사용 가능한 장치 (litres) 의 번호를 지정하십시오.',
'Specify the number of sets needed per 24h': '24h 당 필요한 세트 수를 지정하십시오.',
'Specify the number of units (adult doses) needed per 24h': '24h 당 필요한 장치 (성인 doses) 의 번호를 지정하십시오.',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'ren-lactate 또는 24h 당 필요한 동등한 솔루션의 유닛 (litres) 의 번호를 지정하십시오.',
'Spherical Mercator?': '구면 mercator?',
'Spreadsheet Importer': '스프레드시트 임포터',
'Spreadsheet uploaded': '스프레드시트 업로드',
'Spring': '봄',
'Staff & Volunteers': '직원 및 volunteers',
'Staff ID': '직원 id',
'Staff Member Details': '스태프 구성원 세부사항',
'Staff Members': '스태프 구성원',
'Staff Record': '직원 레코드',
'Staff Type Details': '직원 유형 세부사항',
'Staff Type added': '직원 유형 추가',
'Staff Type deleted': '직원 유형 삭제',
'Staff Type updated': '직원 유형 갱신',
'Staff Types': '직원 유형',
'Staff and Volunteers': '직원 및 volunteers',
'Staff member added': '스태프 구성원 추가',
'Staff present and caring for residents': '스태프 및 해결방법 거주자에게 대한',
'Staff': '스태프',
'Staffing': '스탭핑',
'Stairs': '계단',
'Start Date': '시작 날짜',
'Start date': '시작 날짜',
'Start of Period': '기간 시작',
'State': '주(US 전용)',
'Stationery': '개인양식',
'Status Report': '상태 보고서',
'Status Updated': '상태 갱신',
'Status added': '상태 추가',
'Status deleted': '상태 삭제',
'Status of clinical operation of the facility.': '기능의 임상 조작 상태.',
'Status of general operation of the facility.': 'facility 일반 조작 상태.',
'Status of morgue capacity.': 'morgue 용량 상태.',
'Status of operations of the emergency department of this hospital.': '이 병원 의 긴급 부서의 조작 상태.',
'Status of security procedures/access restrictions in the hospital.': '병원 보안 절차를/액세스 제한 상태.',
'Status of the operating rooms of this hospital.': '이 병원 운영 체제입니다 공간의 상태.',
'Status updated': '상태 갱신',
'Status': 'STATUS',
'Steel frame': '강철 프레임',
'Stolen': '분실',
'Store spreadsheets in the Eden database': '상점 스프레드시트는 eden 데이터베이스의',
'Storeys at and above ground level': 'storeys 및 위 접지선 레벨',
'Storm Force Wind': 'force 심한 바람',
'Storm Surge': '스톰 surge',
'Street Address': '주소',
'Strong Wind': '강한 바람',
'Structural Hazards': '구조적 위험',
'Structural': '구조적',
'Style Field': '양식 필드',
'Style Values': '스타일 값',
'Sub-type': '하위 유형',
'Submission successful - please wait': '제출 성공-기다리십시오',
'Submission successful - please wait...': '제출 성공-기다리십시오...',
'Submit New (full form)': '새 제출 (전체 양식)',
'Submit New (triage)': '새 제출 (triage)',
'Submit New': '새로 제출',
'Submit a request for recovery': '복구 요청 제출',
'Submit new Level 1 assessment (full form)': '제출 새 레벨 1 평가 (전체 양식)',
'Submit new Level 1 assessment (triage)': '제출 새 레벨 1 평가 (triage)',
'Submit new Level 2 assessment': '제출 새 레벨 2 평가',
'Subscription Details': '등록 세부사항',
'Subscription added': '등록이 추가됨',
'Subscription deleted': 'subscription 삭제',
'Subscription updated': '서브스크립션 갱신',
'Subscriptions': '등록',
'Subsector Details': 'subsector 세부사항',
'Subsector added': 'subsector 추가',
'Subsector deleted': 'subsector 삭제됨',
'Subsector updated': 'subsector 갱신',
'Subsistence Cost': 'subsistence 비용',
'Suggest not changing this field unless you know what you are doing.': '제안합니다. 사용자가 아니면 이 필드를 변경합니다.',
'Summary by Administration Level': '요약 레벨 관리',
'Summary': '요약',
'Sunday': '일요일',
'Supply Chain Management': '공급망 관리(SCM)',
'Support Request': '지원요청',
'Support Requests': '지원요청들',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '지위계급의 목록작성을 도와줌으로서 위기 관리자들 그룹의 의사결정을 지원한다',
'Surgery': '수술',
'Survey Answer Details': '서베이 응답 세부사항',
'Survey Answer added': '서베이 응답 추가',
'Survey Answer deleted': '서베이 응답 삭제',
'Survey Answer updated': '서베이 응답 갱신',
'Survey Answer': '조사 응답',
'Survey Module': '모듈 조사',
'Survey Name': '설문 조사 이름',
'Survey Question Details': '서베이 질문 세부사항',
'Survey Question Display Name': '서베이 질문 표시 이름',
'Survey Question added': '서베이 질문 추가',
'Survey Question deleted': '서베이 질문 삭제',
'Survey Question updated': '서베이 질문 갱신된',
'Survey Question': '서베이 질문',
'Survey Series Details': '서베이 시리즈 세부사항',
'Survey Series Name': '서베이 시리즈 이름',
'Survey Series added': '서베이 시리즈 추가',
'Survey Series deleted': '서베이 시리즈 삭제',
'Survey Series updated': '서베이 일련의 갱신',
'Survey Series': '서베이 시리즈',
'Survey Template Details': '서베이 템플리트 세부사항',
'Survey Template added': '서베이 템플리트 추가',
'Survey Template deleted': '서베이 템플리트 삭제',
'Survey Template updated': '서베이 템플리트 갱신',
'Survey Template': '설문 조사 템플리트',
'Survey Templates': '서베이 템플리트',
'Symbology': '바코드',
'Sync Conflicts': '동기화 충돌',
'Sync History': '동기화 히스토리',
'Sync Now': '지금 동기화',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': '동기화 파트너 인스턴스 또는 피어에서 (sahanaeden, sahanaagasti, ushahidi 등. ) 는 sync 함께 할 수 있습니다. 여기서 오른쪽으로 동기화 참여자 추가, sync 비즈니스파트너를 위한 검색 및 수정할 수 있는 페이지로 이동할 수 있는 링크를 누르십시오.',
'Sync Partners': '동기화 파트너',
'Sync Pools': '풀 동기',
'Sync Schedule': '동기화 스케줄',
'Sync Settings': '동기화 설정',
'Sync process already started on': '동기화 프로세스가 이미 시작된 에서',
'Synchronisation': '동기화',
'Synchronization Conflicts': '동기화 충돌',
'Synchronization Details': '동기화 세부사항',
'Synchronization History': '동기화 히스토리',
'Synchronization Peers': '동기화 피어와',
'Synchronization Settings': '동기화 설정',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': '동기화 사용자가 다른 사용자와 있고 다른 피어에서 최신 데이터 자신의 데이터베이스 갱신 데이터를 공유할 수 있습니다. 이 페이지를 sahana eden 의 동기화 기능을 사용하는 방법에 대한 정보를 제공합니다.',
'Synchronization not configured.': '동기화가 구성되지 않았습니다.',
'Synchronization settings updated': '동기화 설정 갱신',
'Synchronization': '동기화',
'Syncronisation History': 'syncronisation 히스토리',
'Tags': '태그',
'Take shelter in place or per <instruction>': '대신 shelter 또는 당<instruction>',
'Task Details': '태스크 세부사항',
'Task List': '태스크 목록',
'Task Status': '태스크 상태',
'Task added': '태스크 추가',
'Task deleted': '태스크가 삭제됨',
'Task updated': '태스크 갱신됨',
'Tasks': '태스크',
'Team Description': '팀 설명',
'Team Details': '팀 세부사항',
'Team ID': '팀 ID',
'Team Id': '팀 ID',
'Team Leader': '팀 리더',
'Team Member added': '팀 구성원 추가',
'Team Members': '팀 구성원',
'Team Name': '팀 이름',
'Team Type': '팀 유형',
'Team added': '팀 추가',
'Team deleted': '팀 삭제',
'Team updated': '팀 갱신된',
'Team': '팀',
'Teams': '팀',
'Technical testing only, all recipients disregard': '기술 테스트, 모든 사람 무시하십시오',
'Telecommunications': '통신',
'Telephone': '전화번호',
'Telephony': '전화 통신',
'Temp folder %s not writable - unable to apply theme!': 'temp 폴더가% unable 테마를 적용할 가능-s 않습니다!',
'Template file %s not readable - unable to apply theme!': '템플리트 파일 (s not installed — unable 테마를 적용할 수-!',
'Templates': '템플리트',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '다섯 번째 레벨-국가 관리 부서 (예: 결정 또는 우편 디비전) 의 용어. 이 레벨은 종종 사용됩니다.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '네 번째 레벨 국가 관리 부서 (예: village, neighborhood 또는 precinct) 내의 용어입니다.',
'Term for the primary within-country administrative division (e.g. State or Province).': '기본 대한-국가 관리 부서 (예: 도) 내에 용어.',
'Term for the secondary within-country administrative division (e.g. District or County).': '보조 대한-국가 관리 부서 (예: 지역 또는 국가) 내에 용어.',
'Term for the third-level within-country administrative division (e.g. City or Town).': '세 번째 레벨-국가 관리 부서 (예: city 또는 town) 의 용어.',
'Term for the top-level administrative division (i.e. Country).': '용어 최상위 레벨 관리 디비전에 (국가:).',
'Territorial Authority': 'territorial 권한',
'Tertiary Server (Optional)': '차 서버 (선택적)',
'Text Color for Text blocks': '텍스트 color 텍스트 블록',
'Text': '텍스트',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': '이메일 유효성 감사합니다. 사용자 계정 활성화될 때 사용자 계정은 시스템 관리자 (% s ). 전자 우편으로 알림을 수신합니다 의해 승인 보류 중입니다.',
'Thanks for your assistance': '사용자의 thanks 대한 지원',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '\\ " query\\ " 조건 \\ " db.table1.field1==\'value\'\\ " 입니다. \\ 같이 " db.table1.field1 == db.table2.field2\\ " sql 조인의 결과.',
'The Area which this Site is located within.': '이 사이트 영역 내에 위치합니다.',
'The Assessments module allows field workers to send in assessments.': '평가를 모듈 필드에 작업자 평가를 에 보낼 수 있습니다.',
'The Author of this Document (optional)': '작성자가 문서 (선택적)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': '건물 asssesments 모듈 빌드 안전 평가할 수 있도록, 지진 후.',
'The Camp this Request is from': '(캠프 이 요청에서.',
'The Camp this person is checking into.': '(캠프 이 사용자 에 점검 중입니다.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '사용자/그룹, 일반 (보고) 또는 precise (맵에서) 의 현재 위치. 몇 자 가능한 위치에서 검색 을 입력하십시오.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '전자 우편 주소 승인 요청 (일반적으로 이 그룹을 메일을 아닌 개별 수 있습니다). 이 필드가 공백인 경우 요청은 자동으로 도메인 일치하는 승인됩니다.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '인시던트 보고 시스템 일반 보고서 인시던트 및 이 추적하도록 허용합니다.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '일반 (보고) 또는 precise (맵에서), 위치, 개인,. 몇 자 가능한 위치에서 검색 을 입력하십시오.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '일반 (보고) 또는 precise (맵에서), 위치, 개인,. 몇 자 가능한 위치에서 검색 을 입력하십시오.',
'The Media Library provides a catalog of digital media.': '매체 라이브러리 디지털 매체 카탈로그를 제공합니다.',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': '메시징 모듈은 sahana 시스템의 기본 통신 허브입니다. sms & 전자 다양한 그룹 및 개인에게 전에 경고 및/또는 메시지, 도중 및 후에 피해 전송하는 데 사용됩니다.',
'The Organization Registry keeps track of all the relief organizations working in the area.': '조직 레지스트리에서 영역에 대한 모든 릴리프 조직을 추적합니다.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': '프로젝트 추적 모듈 활동 작성 요구를 평가 간격이 충족시킬 수 있습니다.',
'The Role this person plays within this hospital.': '이 사람 이 병원 내 재생합니다.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '이 shelter 레지스트리는 모든 shelters 및 저장합니다 관한 기본 세부사항을 추적합니다. 다른 모듈과 shelter, 서비스 사용 등 연관된 사용자 추적하기 위해 협업하는',
'The Shelter this Request is from': '이 shelter 이 요청에서.',
'The Shelter this person is checking into.': 'shelter 는 이 사용자 에 점검 중입니다.',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'url 그의 계층을 맵 브라우저를 패널을 통해 사용 가능한 웹 맵핑할 서비스 (wms) 의 getcapabilities 페이지.',
'The URL of your web gateway without the post parameters': 'url 을 사용하여 web gateway 의 post 매개변수 없이',
'The URL to access the service.': '서비스 url 에 액세스하십시오.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': '고유 id (uuid) 이 정부 지정된 기능.',
'The asset must be assigned to a site OR location.': '사이트 자산 또는 위치를 지정해야 합니다.',
'The attribute which is used for the title of popups.': '이 팝업을 제목은 사용되는 속성.',
'The attribute within the KML which is used for the title of popups.': '이 속성은 kml 내에서 팝업을 제목으로 사용됩니다.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': '속성 (s) kml 내에 팝업을 본문에 사용됩니다. (속성 사이에 공백을 사용하십시오.)',
'The body height (crown to heel) in cm.': '본문 (힐 수 crown) cm.',
'The country the person usually lives in.': '국가, 개인의 삶을 보통.',
'The default Organization for whom this person is acting.': '이 사용자의 기본 역할을 조직.',
'The default Organization for whom you are acting.': '에 대한 구입처에 역할을 하는 기본 조직.',
'The duplicate record will be deleted': '중복 레코드는 삭제됩니다',
'The first or only name of the person (mandatory).': '대상자의 이름 (필수)',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'url 의 양식, wms url 경로에 대한/웹/맵/서비스 는 http://your/web/map/service?service=WMS&request=GetCapabilities 입니다.',
'The language you wish the site to be displayed in.': '에 표시되는 사이트의 언어입니다.',
'The list of Brands are maintained by the Administrators.': '브랜드 목록에서 administrators 의해 유지보수됩니다.',
'The list of Catalogs are maintained by the Administrators.': '카탈로그 목록에서, 관리자에 의해 유지보수됩니다.',
'The map will be displayed initially with this latitude at the center.': '맵이 초기에 center 에서 이 latitude 함께 표시됩니다.',
'The map will be displayed initially with this longitude at the center.': '맵이 초기에 center 에서 이 경도를 함께 표시됩니다.',
'The minimum number of features to form a cluster.': '최소 기능 클러스터를 형성합니다.',
'The name to be used when calling for or directly addressing the person (optional).': '이름 또는 사용자 (선택사항) 주소 직접 호출할 때 사용할 수 있습니다.',
'The next screen will allow you to detail the number of people here & their needs.': '다음 화면에서는 세부사항으로 사람들이 여기에 & 고객의 요구에 수가 있습니다.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '이 수가 작거나 항목의 한 측정 단위에 있는 다른 항목의 측정 단위',
'The number of pixels apart that features need to be before they are clustered.': '픽셀 수를 별도로 자신이 클러스터되는 전에 기능을 합니다.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '표시 맵 주위로 다운로드할 수 바둑판식. 영, 1 페이지를 빠르게 로드하고, 숫자가 커질수록 후속 초점이동을 빠릅니다.',
'The person at the location who is reporting this incident (optional)': '개인이 위치에 있는 이 사건 (선택사항) 보고',
'The post variable containing the phone number': 'post 변수 전화번호를 포함하는',
'The post variable on the URL used for sending messages': 'post 변수 메시지를 보내는 데 사용되는 url',
'The post variables other than the ones containing the message and the phone number': 'post 변수를 다른 메시지와 전화 번호가 포함된 것 이외의',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': '모뎀이 연결된-/dev/ttyUSB0 직렬 포트, linux 등 com2, com2 windows 에서 등에 대한',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '서버가 브라우저에서 요청을 채우기 위해 액세스하는 다른 서버로부터 시기적절한 응답을 수신하지 못했습니다.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '서버가 브라우저에서 요청을 채우기 위해 액세스하는 다른 서버에서 올바르지 않은 응답을 수신했습니다.',
'The site where this position is based.': '이 사이트 위치를 기반으로 합니다.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': '기능을 스태프 responsibile 도움을 요청할 수 있습니다. 요청자의 요청을 완료할 때까지 확인합니다 commitments 이 요청에 대해 그러나 요청을 열린 상태로 만들 수 있습니다.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '주제 이벤트 더 이상 위협 또는 관련된 모든 조치를 수행하십시오 되지 설명되어 있습니다<instruction>',
'The time at which the Event started.': '이벤트가 시작된 시간입니다.',
'The token associated with this application on': '토큰 이 연관된 응용프로그램',
'The unique identifier which identifies this instance to other instances.': '고유 id 이 인스턴스가 다른 인스턴스를 식별합니다.',
'The way in which an item is normally distributed': '항목이 정상적으로 분산되는 방법을',
'The weight in kg.': 'kg 의 가중치입니다.',
'Theme Details': '테마 세부사항',
'Theme added': '테마 추가',
'Theme deleted': '테마 삭제',
'Theme updated': '갱신된 테마',
'Theme': '테마',
'Themes': '주제',
'There are errors': '오류가 있는',
'There are insufficient items in the Inventory to send this shipment': '충분하지 않은 항목 인벤토리에 있는 이 shipment 전송할 수 있습니다',
'There is no address for this person yet. Add new address.': '이 사용자에 대한 주소가 아직 없음. 새 주소를 추가하십시오.',
'These are settings for Inbound Mail.': '이들 인바운드 메일에 대한 설정이 있습니다.',
'These are the Incident Categories visible to normal End-Users': '이 표시 정상 일반 사용자에게 인시던트의 범주입니다',
'These need to be added in Decimal Degrees.': '이러한 decimal 도 추가해야 합니다.',
'They': '다른 사용자',
'This Group has no Members yet': '멤버가 현재 등록된',
'This Team has no Members yet': '멤버가 현재 등록된',
'This appears to be a duplicate of': '이 중복으로 나타납니다',
'This file already exists on the server as': '이 파일은 이미 존재합니다.',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': '이 레벨 생성 경우 이 적합합니다. 로 인한 수정 후 이 레벨을 완료되지 않도록 이 false 로 설정할 수 있습니다.',
'This is the way to transfer data between machines as it maintains referential integrity.': '이는 참조 무결성을 유지보수하는 시스템 간에 데이터를 전송할 수 있는 방법입니다.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '이는 참조 무결성을 유지보수하는 시스템 간에 데이터를 전송할 수 있는 방법입니다. .. 중복 데이터 1 수동으로 제거해야 합니다!',
'This level is not open for editing.': '이 레벨은 열고 편집할 수 없습니다.',
'This might be due to a temporary overloading or maintenance of the server.': '이 때문에 임시 과부하 또는 유지보수로 될 수 있습니다.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': '이 모듈은 인벤토리 항목 및 요청된 설비 의 자원 사이의 운송되도록 있습니다.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': '이 모듈의 연습을 다 & 이벤트 시나리오 계획할 수 있습니다. 이러한 쉽게 mobilized 수 있는 적절한 자원 (인력, 자산 및 설비) 을 할당할 수 있습니다.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': '이 페이지에서는 지난 syncs 의 로그를 표시합니다. 이 페이지로 가려면 아래 링크를 누르십시오.',
'This screen allows you to upload a collection of photos to the server.': '이 화면에서는 사진 콜렉션을 서버로 업로드할 수 있습니다.',
'This setting can only be controlled by the Administrator.': '이 설정은 관리자가 제어할 수 있습니다.',
'This shipment has already been received.': '이 shipment 이미 받았습니다.',
'This shipment has already been sent.': '이 shipment 이미 전송되었습니다.',
'This shipment has not been received - it has NOT been canceled because can still be edited.': '이 shipment 때문에 이를 계속 편집할 수 취소되었음-수신되지 않았습니다.',
'This shipment has not been sent - it has NOT been canceled because can still be edited.': '이 shipment 때문에 이를 계속 편집할 수 취소되었음-전송되지 않았습니다.',
'This shipment will be confirmed as received.': '이 shipment 받은 확인할 수 있습니다.',
'Thursday': '목요일',
'Ticket Details': '티켓 세부사항',
'Ticket ID': '티켓 ID',
'Ticket added': '티켓 추가',
'Ticket deleted': '티켓 삭제됨',
'Ticket updated': '티켓 갱신',
'Ticket': '티켓',
'Ticketing Module': 'ticketing 모듈',
'Tickets': '티켓',
'Tilt-up concrete': '틸트-up)',
'Timber frame': 'timber 프레임',
'Timeline Report': '타임라인 보고서',
'Timeline': '타임라인',
'Title to show for the Web Map Service panel in the Tools panel.': '제목 도구 패널에서 웹 맵 서비스 패널을 표시합니다.',
'To Location': '대상 위치',
'To Person': '개인 수신',
'To begin the sync process, click the button on the right =>': '대한 sync 프로세스를 시작하려면, 마우스 => 단추를 누르십시오',
'To begin the sync process, click this button =>': '에 대한 동기화 프로세스를 시작하려면 이 단추를 누르십시오 =>',
'To create a personal map configuration, click': '개인용 맵 구성을 작성하려면 클릭하십시오',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': '데 openstreetmap 편집하려면 models/000_config. py 의 openstreetmap 설정을 편집해야 합니다',
'To search by job title, enter any portion of the title. You may use % as wildcard.': '에 의해 작업 제목, 제목 일부를 입력하십시오. % 와일드 카드로 사용할 수 있습니다.',
'To variable': '변수',
'To': '종료',
'Tools': '공구',
'Total # of Target Beneficiaries': '총 대상 받아야 중',
'Total # of households of site visited': '사이트 households 의 총 방문',
'Total Beds': '총 의료용',
'Total Beneficiaries': '총 받아야',
'Total Cost per Megabyte': '총 mb 수',
'Total Cost per Minute': '분당 총 비용',
'Total Monthly Cost': '월별 총 비용',
'Total Monthly Cost:': '월별 총 비용:',
'Total Monthly': '월별 총계',
'Total One-time Costs': '총 일회 비용',
'Total Persons': '총 사용자',
'Total Recurring Costs': '반복 총 비용',
'Total Unit Cost': '총 비용',
'Total Unit Cost:': '총 비용:',
'Total Units': '총 장치',
'Total gross floor area (square meters)': '총 매출총이익 바닥 영역 (제곱 미터로)',
'Total number of beds in this hospital. Automatically updated from daily reports.': '의료용 이 병원 총 수입니다. 자동으로 매일 보고서 에서 갱신되었습니다.',
'Total number of houses in the area': '총 영역에서 장착합니다.',
'Total number of schools in affected area': '총 받는 영역에 학교 중',
'Total population of site visited': '사이트의 총 방문',
'Total': '전체',
'Totals for Budget:': '총계를 예산:',
'Totals for Bundle:': '번들에 대한 총계:',
'Totals for Kit:': '상품 총계:',
'Tourist Group': '여행자 그룹',
'Town': '읍',
'Traces internally displaced people (IDPs) and their needs': '내부 추적 위치가 사람 (idps) 및 해당 하는',
'Tracing': '추적',
'Track Details': '세부사항 추적',
'Track deleted': '추적 삭제',
'Track updated': '갱신된 트랙',
'Track uploaded': '추적합니다 업로드된',
'Track with this Person?': '이 개인과 추적합니다?',
'Track': '트랙',
'Tracking of Projects, Activities and Tasks': '프로젝트 추적, 활동 및 태스크',
'Tracking of basic information on the location, facilities and size of the Shelters': '기본 추적 정보의 위치에 따라, 설비 및 shelters 의 크기',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'shelters 의 위치, 희생 distibution, 용량 및 작업분류 트랙',
'Tracks': '트랙',
'Traffic Report': '트래픽 보고서',
'Training Course Catalog': '교육 과정 카탈로그',
'Training Details': '교육 세부사항',
'Training added': '교육 추가',
'Training deleted': '연계 삭제',
'Training updated': '갱신된 교육',
'Training': '교육',
'Transit Status': '전송 상태',
'Transit': '운송',
'Transition Effect': '전환 효과',
'Transparent?': '투명합니다?',
'Transportation assistance, Rank': '교통, 랭크',
'Trauma Center': 'trauma center',
'Travel Cost': '여행 비용',
'Tropical Storm': 'tropical 먹회색',
'Tropo Messaging Token': 'tropo 토큰 전달',
'Tropo Settings': 'tropo 설정',
'Tropo Voice Token': 'tropo 음성 토큰',
'Tropo settings updated': 'tropo 설정 갱신',
'Truck': '트럭',
'Try checking the URL for errors, maybe it was mistyped.': '오류에 대해 url 검사, maybe it 입력했습니다.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': '화면 갱신/다시 로드 단추를 누르면 주소 표시줄에서 url 을 다시 시도하십시오.',
'Try refreshing the page or hitting the back button on your browser.': '페이지 새로 고침 또는 브라우저의 뒤로 단추를 누르면 시도하십시오.',
'Tuesday': '화요일',
'Twitter ID or #hashtag': 'twitter id 또는 #hashtag',
'Twitter Settings': 'twitter 설정',
'Twitter': '트위터',
'Type of Construction': '구현 유형',
'Type of water source before the disaster': '유형 (소스 피해 전',
'UN': '유엔',
'Un-Repairable': '복구 불가한',
'Unable to parse CSV file!': 'unable csv 파일을 구문 분석할 수 없습니다!',
'Unidentified': '미확인',
'Unit Cost': '단가',
'Unit added': '장치 추가',
'Unit deleted': '장치 삭제',
'Unit of Measure': '측정 단위',
'Unit updated': '갱신된 장치',
'Units': '단위',
'Unknown Peer': '알 수 없는 피어',
'Unknown type of facility': '알 수 없는 유형 기능',
'Unknown': '알 수 없음',
'Unresolved Conflicts': '분석되지 않은 충돌',
'Unsafe': '나타남',
'Unselect to disable the modem': '모뎀 불가능으로 선택',
'Unsent': '보내지 않음',
'Unsupported data format!': '지원되지 않는 데이터 형식!',
'Unsupported method!': '지원되지 않는 메소드!',
'Update Activity Report': '활동 보고서 갱신',
'Update Cholera Treatment Capability Information': 'cholera treatment 기능 정보 갱신',
'Update Request': '업데이트 요청',
'Update Service Profile': '서비스 프로파일 갱신',
'Update Status': '업데이트 상태',
'Update Task Status': '태스크 상태 갱신',
'Update Unit': '장치 갱신',
'Update if Master': '마스터 경우 갱신',
'Update if Newer': '최신이면 갱신',
'Update your current ordered list': '현재 정렬된 목록 갱신',
'Update': '업데이트',
'Updated By': '에 의해 업데이트 됨',
'Upload Photos': '사진 업로드',
'Upload Spreadsheet': '스프레드시트 업로드',
'Upload Track': '업로드 트랙',
'Upload a Spreadsheet': '스프레드시트 업로드',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '이미지 파일 (bmp, gif, jpeg 또는 png), max. 업로드 300x300 픽셀!',
'Upload an image file here.': '이미지 파일을 업로드하십시오.',
'Upload an image, such as a photo': '이미지 (예: 사진 업로드',
'Urban Fire': 'fire 도시',
'Urban area': '영역 도시',
'Urdu': '우르두어',
'Urgent': '긴급',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '및, 또는 (...)|(...), 및 의 더 복잡한 조회를 빌드할 수 ~(...) 대한 (...)&(...).',
'Use Geocoder for address lookups?': '주소 찾아보기에 지오코더?',
'Use default': '기본값 사용',
'Use these links to download data that is currently in the database.': '이 링크는 현재 데이터베이스에 있는 데이터를 다운로드할 수 있습니다.',
'Used by IRS & Assess': 'IRS & Assess가 사용하는',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': '사용할 onhover 도구 및 클러스터 팝업을 유형을 구분할 수 있습니다.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': '사용할 onhover 도구 및 1 필드에 클러스터 팝업을 레코드를 구분하는 데 사용됩니다.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '입력한 해당 위치의 위도 확인하는 데 사용됩니다. 해당 위치에 있는 자원 필터 목록 데 사용될 수 있습니다.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '입력한 해당 위치의 경도 확인하는 데 사용됩니다. 해당 위치에 있는 자원 필터 목록 데 사용될 수 있습니다.',
'Used to import data from spreadsheets into the database': 'spreadsheets 데이터베이스로 데이터를 가져오는 데 사용됩니다',
'Used within Inventory Management, Request Management and Asset Management': '재고 관리, 관리 및 자산 관리 내에서 사용되는',
'User Account has been Disabled': '사용자 계정을 사용할 수 없습니다',
'User Details': '사용자 세부사항',
'User Management': '사용자 관리',
'User Profile': '사용자 프로파일',
'User Requests': '사용자 요청',
'User Updated': '사용자가 갱신되었습니다',
'User added': '사용자 추가됨',
'User already has this role': '이 역할은 사용자가 이미 있습니다',
'User deleted': '사용자 삭제됨',
'User updated': '사용자가 갱신되었습니다',
'Users removed': '사용자가 제거되었습니다.',
'Users': '사용자',
'Uses the REST Query Format defined in': '나머지 조회에 정의된 형식을 사용합니다',
'Utilities': '유틸리티',
'Utility, telecommunication, other non-transport infrastructure': '유틸리티, 통신, 기타 전송 인프라',
'Various Reporting functionalities': '다양한 보고 기능',
'Vehicle Crime': '차량 범죄',
'Vehicle Types': '자동차 유형',
'Vehicle': '차량',
'Verification Status': '검증 상태',
'Verified?': '확인되었습니다?',
'Verify password': '비밀번호 검증',
'Very Good': '매우 양호',
'Very High': '매우 높음',
'View Alerts received using either Email or SMS': '경보 보기 중 전자 우편 또는 sms 를 받은',
'View All': '모두 보기',
'View Error Tickets': '오류 티켓 보기',
'View Fullscreen Map': '보기 fullscreen 맵',
'View Image': '이미지 보기',
'View Items': '항목 보기',
'View On Map': '맵 보기',
'View Outbox': '편지함 보기',
'View Picture': '그림 열람',
'View Settings': '보기 설정',
'View Tickets': '티켓 보기',
'View and/or update their details': '보기 및/또는 해당 세부사항 갱신',
'View or update the status of a hospital.': '보기 또는 갱신 병원 의 상태.',
'View pending requests and pledge support.': '보류 중인 요청 및 pledge 지원 보기.',
'View the hospitals on a map.': '맵에서 이 병원 보십시오.',
'View/Edit the Database directly': '보기/데이터베이스 직접 편집',
'Village Leader': 'village 리더',
'Visible?': '표시하시겠습니까?',
'Visual Recognition': 'visual 인식',
'Volcanic Ash Cloud': 'volcanic ash 클라우드에',
'Volcanic Event': 'volcanic 이벤트',
'Volume (m3)': '볼륨 (m3)',
'Volunteer Availability': '가용성 지원자',
'Volunteer Details': '지원자 세부사항',
'Volunteer Information': '지원자 정보',
'Volunteer Management': '지원자 관리',
'Volunteer Project': '지원자 프로젝트',
'Volunteer Record': '지원자 레코드',
'Volunteer Request': '지원자 요청',
'Volunteer added': '지원자 추가됩니다',
'Volunteer availability added': '지원자 가용성 추가',
'Volunteer availability deleted': '자발적으로 삭제된 가용성',
'Volunteer availability updated': '지원자 가용성 갱신',
'Volunteer deleted': '자발적으로 삭제된',
'Volunteer details updated': '지원자 세부사항 갱신',
'Volunteers were notified!': 'volunteers 통지됩니다.',
'Volunteers': 'volunteers',
'Vote': '투표',
'Votes': '투표 수',
'WASH': '씻으십시오',
'Walking Only': '전용 워킹',
'Wall or other structural damage': '벽 또는 다른 구조적 손상',
'Warehouse Details': 'warehouse 세부사항',
'Warehouse added': '웨어하우스 추가됩니다',
'Warehouse deleted': '웨어하우스 삭제됨',
'Warehouse updated': '웨어하우스 갱신된',
'Warehouses': '웨어하우스',
'Water Sanitation Hygiene': '물 sanitation 예방',
'Water collection': '콜렉션 사용',
'Water gallon': '물 갤런',
'Water storage containers in households': '워터마크 스토리지 컨테이너를 households)',
'Water supply': '물 공급',
'Web Map Service Browser Name': '맵 서비스에서 웹 브라우저 이름',
'Web Map Service Browser URL': '맵 서비스에서 웹 브라우저 url',
'Website': '웹 사이트',
'Wednesday': '수요일',
'Weight (kg)': '무게 (kg)',
'Weight': '무게',
'Welcome to the Sahana Portal at': '이 sahana portal 에 오신 것을 환영합니다',
'Wheat': '밀색',
'When reports were entered': '입력된 보고서',
'Who is doing what and where': '누가 어떤 수행하고,',
'Who usually collects water for the family?': '일반적으로 사람을 수집하는 제품군용 워터마크?',
'Width (m)': '너비 (m)',
'Wild Fire': '와일드 fire',
'Wind Chill': '바람 chill',
'Window frame': '창 프레임',
'Winter Storm': '겨울 폭풍',
'Women of Child Bearing Age': '출산 가능한 시기의 여성들',
'Women participating in coping activities': '대처 활동에 참가하는 여성들',
'Women who are Pregnant or in Labour': '임신 중이거나 일을 하는 여성들',
'Womens Focus Groups': '여성 포커스 그룹',
'Wooden plank': '나무 plank',
'Wooden poles': '나무 폴',
'Working hours end': '작업 시간 종료',
'Working hours start': '작업 시간 시작',
'Working or other to provide money/food': '작업 또는 다른 돈을/식품 제공하기 위해',
'YES': '예',
'Year built': '건축 연도',
'Year of Manufacture': '제조 연도',
'Yellow': '노란색',
'Yes': '예',
'You are a recovery team?': '복구 팀?',
'You are attempting to delete your own account - are you sure you want to proceed?': '자신의 계정-삭제하려고 시도합니다 확실합니까 계속하시겠습니까?',
'You are currently reported missing!': '현재 보고됩니다 누락되었습니다!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '설정값 섹션에서 동기화 모듈의 구성을 변경할 수 있습니다. 이 구성, uuid (unique identification number), 동기화 스케줄, beacon 서비스 등이 포함됩니다. 다음 링크를 눌러 동기화 설정 페이지로 이동하십시오.',
'You can click on the map below to select the Lat/Lon fields': '맵핑 아래 lat/₩ 필드를 선택할 수 있습니다',
'You can select the Draw tool': '사용자가 그리기 도구를 선택할 수 있습니다',
'You can set the modem settings for SMS here.': '모뎀 설정을 sms 여기에 설정할 수 있습니다.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '변환 도구를 gps 변환하기 위해 사용할 수 있는 좌표 또는 도/분/초.',
'You do not have permission for any facility to make a commitment.': '모든 기능에대한 약속을 결정할 권한이 없습니다',
'You do not have permission for any facility to make a request.': '모든 기능에 대한 요청을 할 권한이 없습니다',
'You do not have permission for any site to add an inventory item.': '모든 사이트에 대해 자원 명세 항목을 추가할 권한이 없습니다.',
'You do not have permission for any site to receive a shipment.': '권한이 있는 모든 사이트의 납품 받을 필요가 없습니다.',
'You do not have permission for any site to send a shipment.': '권한이 있는 모든 사이트의 납품 보낼 필요가 없습니다.',
'You do not have permission to cancel this received shipment.': '이 받은 shipment 취소할 수 없습니다.',
'You do not have permission to cancel this sent shipment.': '이 shipment 보낸 취소할 수 없습니다.',
'You do not have permission to make this commitment.': '이 확약 변경할 필요가 없습니다.',
'You do not have permission to receive this shipment.': '이 shipment 수신할 수 없습니다.',
'You do not have permission to send a shipment from this site.': '이 사이트에서 shipment 보낼 필요가 없습니다.',
'You do not have permission to send this shipment.': '이 shipment 전송할 수 있는 권한이 없습니다.',
'You have a personal map configuration. To change your personal configuration, click': '사용자가 개인용 맵 구성. 개인용 구성 변경, 누르십시오',
'You have found a dead body?': '사용자는 데드 본문을 찾을 수 있습니까?',
'You must be logged in to register volunteers.': 'volunteers 를 등록할 수 로그인해야 합니다.',
'You must be logged in to report persons missing or found.': '누락된 또는 찾을 사람이 보고서 에 로그인해야 합니다.',
'You must provide a series id to proceed.': '일련의 id 진행하려면 제공해야 합니다.',
'You should edit Twitter settings in models/000_config.py': '모델/000_config. py 에서 twitter 설정을 편집해야 합니다',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '현재 솔루션 항목의 순서화된 목록 아래에 표시됩니다. 다시 voting 이를 변경할 수 있습니다.',
'Your post was added successfully.': 'post 가 성공적으로 추가되었습니다.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': '시스템 고유 id (uuid), 다른 컴퓨터를 중심으로 사용자가 식별할 수 있습니다. 사용자의 uuid 보기 동기화 -> sync 설정으로 이동하십시오. 또한 페이지의 다른 설정을 볼 수 있습니다.',
'Zero Hour': '이 시간',
'Zinc roof': 'roof 아연',
'Zoom Levels': '확대/축소 레벨',
'Zoom': '확대/축소',
'active': '활성화',
'added': '추가됨',
'all records': '모든 레코드',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': '예산 기반으로 인력 및 장비 비용, 모든 관리 오버헤드를 포함하여 개발할 수 있습니다.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '조사 자연 재해 다음 손상을 평가하고 작성 및 관리할 수 있습니다.',
'an individual/team to do in 1-2 days': '개인/팀은 1-2 일 수',
'assigned': '지정됨',
'average': '평균',
'black': '검은색',
'blue': '파란색',
'brown': '갈색',
'by': '게시자',
'c/o Name': 'c/o 이름',
'can be used to extract data from spreadsheets and put them into database tables.': '스프레드시트 데이터를 추출하는 데 사용할 수 있고 데이터베이스 테이블로 이를 넣으십시오.',
'cancelled': '취소',
'check all': '모두 선택',
'click for more details': '자세한 내용은 누르십시오',
'completed': '완료됨',
'consider': '고려',
'curly': '중괄호',
'currently registered': '현재 등록된',
'dark': '어둡게',
'data uploaded': '데이터 업로드',
'database %s select': '데이터베이스% 를 선택하십시오.',
'database': '데이터베이스',
'deceased': '사망함',
'delete all checked': '모두 삭제 checked',
'deleted': '삭제',
'design': '설계',
'displaced': '프로덕트를',
'divorced': '이혼',
'done!': '완료',
'duplicate': '중복',
'edit': '편집',
'eg. gas, electricity, water': '예: 가스, 전기, 물',
'enclosed area': '영역 안에',
'export as csv file': 'csv 파일로 내보내기',
'feedback': '피드백',
'female': 'Female',
'flush latrine with septic tank': 'flush latrine septic 와 탱크',
'found': '발견됨',
'from Twitter': 'twitter 에서',
'green': '초록색',
'grey': '회색',
'here': '다음은',
'high': '높음',
'hourly': '시간별',
'households': '가구수',
'identified': '식별됨',
'ignore': '무시',
'in Deg Min Sec format': '정도 분 초 형식으로',
'in GPS format': 'gps 형식으로',
'inactive': '비활성화',
'insert new %s': '새로운 %s를(을) 삽입합니다',
'insert new': '새로 삽입',
'invalid request': '올바르지 않은 요청',
'invalid': '올바르지 않음',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': '모든 피해 희생 및 제품군에서 특히 정보를 casualties, evacuees 및 프로덕트를 사용자 식별된 저장할 수 있는 중앙 온라인 저장소입니다. 정보: 이름, 나이, 연락처, 등록정보창, 해당 위치 및 기타 세부사항을 캡처합니다. 사람 그림 및 finger 세부사항 인쇄 시스템 업로드할 수 있습니다. 또한 사용자 편의를 위해 효율성 및 group by 캡처될 수 있습니다.',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': '함께 제공하는 조직에서 완화 및 프로젝트의 항목 관리에 대한 복잡한 기능을 작업을 여러 하위 모듈로 구성됩니다 위해 계획된 것입니다. 이 흡입구 시스템, 웨어하우스 관리 시스템, 상품 추적, 공급망 관리, 차량 관리, 조달, 재무 추적 및 기타 자산 및 자원 관리 기능이 있습니다',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '이를 categorised 및 actioning 대한 적절한 라우트할 수 있도록 모든 수신 티켓을 추적합니다.',
'leave empty to detach account': '비어 있는 상태로 분리하려면 계정',
'legend URL': 'url 범례',
'light': '얇게',
'login': '로그인',
'low': '낮음',
'manual': '수동',
'medium': '중간',
'meters': '미터',
'missing': '누락',
'module allows the site administrator to configure various options.': '모듈 사이트 운영자 다양한 옵션을 구성할 수 있습니다.',
'module helps monitoring the status of hospitals.': '모듈 병원 의 상태 모니터링 도움이 됩니다.',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': '모듈 메커니즘입니다 공동으로 개발 피해 개요, 온라인 맵핑 (gis) 를 제공합니다.',
'more': '자세히',
'new record inserted': '새 레코드 삽입',
'new': '신규',
'next 100 rows': '다음 100 행',
'none': '없음',
'normal': '정상',
'not accessible - no cached version available!': '액세스할 수 없음-사용할 수 있는 캐시된 버전!',
'not accessible - using cached version from': '액세스할-캐시된 버전을 사용하여',
'not specified': '지정되지 않음',
'obsolete': '사용되지 않음',
'open defecation': '열기 defecation',
'optional': '선택적',
'or import from csv file': 'csv 파일에서 가져오기',
'other': '기타',
'over one hour': '한 시간 동안',
'people': '사용자',
'piece': '조각',
'postponed': '연기',
'preliminary template or draft, not actionable in its current form': '예비 템플리트 또는 드래프트, 실천 현재 양식으로',
'previous 100 rows': '이전 100 행',
'record does not exist': '레코드가 없음',
'record id': '레코드 ID',
'red': '적색',
'reports successfully imported.': '보고서 임포트했습니다.',
'representation of the Polygon/Line.': '다각형/행 표시.',
'retired': '퇴직함',
'river': '강',
'see comment': '주석 참조',
'selected': '선택',
'separated from family': '제품군에서 구분됩니다',
'separated': '분리',
'shaved': '면도 된',
'sides': '측면',
'sign-up now': '이제 사인업',
'specify': '지정',
'staff members': '스태프 구성원',
'staff': '스태프',
'state location': '위치 상태',
'state': '상태(State)',
'straight': '직선',
'suffered financial losses': '재정 손실을 발생함',
'table': '테이블',
'this': '그러면',
'to access the system': '시스템 액세스',
'total': '전체',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy 모듈 사용 중인 python-이 내에서 비 tropo twitter 지원 설치 합니다!',
'unable to parse csv file': 'unable csv 파일을 구문 분석할 수 없습니다.',
'uncheck all': '모두 선택 취소',
'unidentified': '미확인',
'unknown': '알 수 없음',
'unspecified': '지정되지 않음',
'unverified': '확인되지 않음',
'updated': '업데이트 날짜',
'updates only': '갱신사항만',
'verified': '확인',
'volunteer': '지원자',
'wavy': '물결선',
'weekly': '주별',
'white': '흰색',
'wider area, longer term, usually contain multiple Activities': '넓은 영역, 장기, 보통 여러 활동이 포함된',
'widowed': '사별',
'within human habitat': 'habitat 내의 사용자',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt 모듈 사용 중인 python-이 내에서 xls 출력 installing 합니다!',
'yes': '예',
}
|
code-for-india/sahana_shelter_worldbank
|
languages/ko.py
|
Python
|
mit
| 244,176
|
[
"VisIt"
] |
36c424a7a9b94423adc4b2e42e41ca71cce10f4e10396c8274b9ee7442ed534c
|
import os
from exceptions import TableException
import atpy
from decorators import auto_download_to_file, auto_decompress_to_fileobj, auto_fileobj_to_file
try:
import h5py
h5py_installed = True
except:
h5py_installed = False
def _check_h5py_installed():
if not h5py_installed:
raise Exception("Cannot read/write HDF5 files - h5py required")
def _get_group(filename, group="", append=False):
if append:
f = h5py.File(filename, 'a')
else:
f = h5py.File(filename, 'w')
if group:
if append:
if group in f.keys():
g = f[group]
else:
g = f.create_group(group)
else:
g = f.create_group(group)
else:
g = f
return f, g
def _create_required_groups(g, path):
'''
Given a file or group handle, and a path, make sure that the specified
path exists and create if necessary.
'''
for dirname in path.split('/'):
if not dirname in g:
g = g.create_group(dirname)
else:
g = g[dirname]
def _list_tables(file_handle):
list_of_names = []
file_handle.visit(list_of_names.append)
tables = {}
for item in list_of_names:
if isinstance(file_handle[item], h5py.highlevel.Dataset):
if file_handle[item].dtype.names:
tables[item] = item
return tables
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read(self, filename, table=None, verbose=True):
'''
Read a table from an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to read the table from
OR
*file or group handle*: [ h5py.highlevel.File | h5py.highlevel.Group ]
The HDF5 file handle or group handle to read the table from
Optional Keyword Arguments:
*table*: [ string ]
The name of the table to read from the HDF5 file (this is only
required if there are more than one table in the file)
'''
_check_h5py_installed()
self.reset()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
else:
if not os.path.exists(filename):
raise Exception("File not found: %s" % filename)
f = h5py.File(filename, 'r')
g = f['/']
# If no table is requested, check that there is only one table
if table is None:
tables = _list_tables(g)
if len(tables) == 1:
table = tables.keys()[0]
else:
raise TableException(tables, 'table')
# Set the table name
self.table_name = str(table)
self._setup_table(len(g[table]), g[table].dtype)
# Add columns to table
for name in g[table].dtype.names:
self.data[name][:] = g[table][name][:]
for attribute in g[table].attrs:
self.add_keyword(attribute, g[table].attrs[attribute])
if f is not None:
f.close()
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read_set(self, filename, pedantic=False, verbose=True):
'''
Read all tables from an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to read the tables from
'''
_check_h5py_installed()
self.reset()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
else:
if not os.path.exists(filename):
raise Exception("File not found: %s" % filename)
f = h5py.File(filename, 'r')
g = f['/']
for keyword in g.attrs:
self.keywords[keyword] = g.attrs[keyword]
for table in _list_tables(g):
t = atpy.Table()
read(t, filename, table=table, verbose=verbose)
self.append(t)
if f is not None:
f.close()
def write(self, filename, compression=False, group="", append=False,
overwrite=False, ignore_groups=False):
'''
Write the table to an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to write the table to
OR
*file or group handle*: [ h5py.highlevel.File | h5py.highlevel.Group ]
The HDF5 file handle or group handle to write the table to
Optional Keyword Arguments:
*compression*: [ True | False ]
Whether to compress the table inside the HDF5 file
*group*: [ string ]
The group to write the table to inside the HDF5 file
*append*: [ True | False ]
Whether to append the table to an existing HDF5 file
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
*ignore_groups*: [ True | False ]
With this option set to True, groups are removed from table names.
With this option set to False, tables are placed in groups that
are present in the table name, and the groups are created if
necessary.
'''
_check_h5py_installed()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
if group:
if group in g:
g = g[group]
else:
g = g.create_group(group)
else:
if os.path.exists(filename) and not append:
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
f, g = _get_group(filename, group=group, append=append)
if self.table_name:
name = self.table_name
else:
name = "Table"
if ignore_groups:
name = os.path.basename(name)
else:
path = os.path.dirname(name)
if path:
_create_required_groups(g, path)
if name in g.keys():
raise Exception("Table %s/%s already exists" % (group, name))
dset = g.create_dataset(name, data=self.data, compression=compression)
for keyword in self.keywords:
dset.attrs[keyword] = self.keywords[keyword]
if f is not None:
f.close()
def write_set(self, filename, compression=False, group="", append=False,
overwrite=False, ignore_groups=False, **kwargs):
'''
Write the tables to an HDF5 file
Required Arguments:
*filename*: [ string ]
The HDF5 file to write the tables to
OR
*file or group handle*: [ h5py.highlevel.File | h5py.highlevel.Group ]
The HDF5 file handle or group handle to write the tables to
Optional Keyword Arguments:
*compression*: [ True | False ]
Whether to compress the tables inside the HDF5 file
*group*: [ string ]
The group to write the table to inside the HDF5 file
*append*: [ True | False ]
Whether to append the tables to an existing HDF5 file
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
*ignore_groups*: [ True | False ]
With this option set to True, groups are removed from table names.
With this option set to False, tables are placed in groups that
are present in the table name, and the groups are created if
necessary.
'''
_check_h5py_installed()
if isinstance(filename, h5py.highlevel.File) or isinstance(filename, h5py.highlevel.Group):
f, g = None, filename
if group:
if group in g:
g = g[group]
else:
g = g.create_group(group)
else:
if os.path.exists(filename) and not append:
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
f, g = _get_group(filename, group=group, append=append)
for keyword in self.keywords:
g.attrs[keyword] = self.keywords[keyword]
for i, table_key in enumerate(self.tables):
if self.tables[table_key].table_name:
name = self.tables[table_key].table_name
else:
name = "Table_%02i" % i
if ignore_groups:
name = os.path.basename(name)
else:
path = os.path.dirname(name)
if path:
_create_required_groups(g, path)
if name in g.keys():
raise Exception("Table %s/%s already exists" % (group, name))
dset = g.create_dataset(name, data=self.tables[table_key].data, compression=compression)
for keyword in self.tables[table_key].keywords:
dset.attrs[keyword] = self.tables[table_key].keywords[keyword]
if f is not None:
f.close()
|
stvoutsin/pyrothorn
|
pyrothorn/pyroquery/atpy/hdf5table.py
|
Python
|
gpl-3.0
| 8,820
|
[
"VisIt"
] |
7bb5e07627560065d30cca1110c46472064cc169b33a844d253277086778528b
|
import truetypetracer as ttt
import openvoronoi as ovd
import ovdvtk
import time
import vtk
def translate(segs, x, y):
out = []
for seg in segs:
seg2 = []
for p in seg:
p2 = []
p2.append(p[0] + x)
p2.append(p[1] + y)
seg2.append(p2)
# seg2.append(seg[3] + y)
out.append(seg2)
return out
def insert_polygon_points(vd, polygon):
pts = []
for p in polygon:
pts.append(ovd.Point(p[0], p[1]))
id_list = []
# print "inserting ",len(pts)," point-sites:"
m = 0
for p in pts:
id_list.append(vd.addVertexSite(p))
# print " ",m," added vertex ", id_list[ len(id_list) -1 ]
m = m + 1
print m, " point-sites inserted." # inserting ",len(pts)," point-sites:"
return id_list
def insert_polygon_segments(vd, id_list):
j = 0
print "inserting ", len(id_list), " line-segments:"
for n in range(len(id_list)):
n_nxt = n + 1
if n == (len(id_list) - 1):
n_nxt = 0
print " ", j, "inserting segment ", id_list[n], " - ", id_list[n_nxt]
if 0: # id_list[n] == 31921: #78238: # 47013:
vd.debug_on()
vd.addLineSite(id_list[n], id_list[n_nxt], 2) # fails: now 78238/13
vod.setVDText2([1, 1])
vod.setAll()
# verts=[id_list[n], id_list[n_nxt], 117443,117445,117460,117454]
# for v in verts:
# print "drawing ",v
# print vod
# print dir(vod)
# vod.drawVertexIdx(v)
vod.drawIncidentVertexIds()
# f4792 f4795
for v in vd.getFaceVertices(18924):
vod.drawVertexIdx(v)
print "PYTHON All DONE."
# f = ovd.Point(0.055,-0.2437)
# myscreen.camera.SetPosition(f.x, f.y-float(1)/float(1000), 0.3)
# myscreen.camera.SetClippingRange(-(zmult+1)*camPos,(zmult+1)*camPos)
# myscreen.camera.SetFocalPoint( f.x, f.y, 0)
myscreen.render()
myscreen.iren.Start()
elif 0: # id_list[n] in [ 78206, 78241, 78225]:
vd.addLineSite(id_list[n], id_list[n_nxt])
else:
# pass
vd.addLineSite(id_list[n], id_list[n_nxt])
j = j + 1
def modify_segments(segs):
segs_mod = []
for seg in segs:
first = seg[0]
last = seg[len(seg) - 1]
assert (first[0] == last[0] and first[1] == last[1])
seg.pop()
seg.reverse()
segs_mod.append(seg)
# drawSegment(myscreen, seg)
return segs_mod
def insert_many_polygons(vd, segs):
polygon_ids = []
t_before = time.time()
for poly in segs:
poly_id = insert_polygon_points(vd, poly)
polygon_ids.append(poly_id)
t_after = time.time()
pt_time = t_after - t_before
t_before = time.time()
for ids in polygon_ids:
insert_polygon_segments(vd, ids)
t_after = time.time()
seg_time = t_after - t_before
return [pt_time, seg_time]
def ttt_segments(text, scale):
wr = ttt.SEG_Writer()
# wr.scale = 3
wr.arc = False
wr.conic = False
wr.cubic = False
wr.conic_biarc_subdivision = 10 # this has no effect?
wr.conic_line_subdivision = 25 # this increases nr of points
wr.cubic_biarc_subdivision = 10 # no effect?
wr.cubic_line_subdivision = 10 # no effect?
wr.scale = float(1) / float(scale)
wr.setFont(3)
# 0 OK freeserif
# 1 OK freeserif bold
# 2 err freeserif italic (has "VX" overlap!)
# 3 OK freeserif bold italic
# 4 OK fonts.push_back( "/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf" );
# 5 err fonts.push_back( "/usr/share/fonts/truetype/freefont/FreeMonoBoldOblique.ttf" ); PPPSolver error?
# 6 err fonts.push_back( "/usr/share/fonts/truetype/freefont/FreeMonoOblique.ttf" ) error?
# 7 OK fonts.push_back( "/usr/share/fonts/truetype/freefont/FreeSans.ttf" );
# 8 err fonts.push_back( "/usr/share/fonts/truetype/freefont/FreeSansBold.ttf" );
# 9 err fonts.push_back( "/usr/share/fonts/truetype/freefont/FreeSansBoldOblique.ttf" );
# 10 err fonts.push_back( "/usr/share/fonts/truetype/freefont/FreeSansOblique.ttf" );
s3 = ttt.ttt(text, wr)
segs = wr.get_segments()
return segs
if __name__ == "__main__":
# w=2500
# h=1500
w = 1600
h = 1024
# w=1024
# h=1024
myscreen = ovdvtk.VTKScreen(width=w, height=h)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInputConnection(w2if.GetOutputPort())
# w2if.Modified()
# lwr.SetFileName("tux1.png")
scale = 1
far = 1
camPos = far
zmult = 3
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0.0, 0, 0)
scale = 25000
segs = ttt_segments("ABCDEFGHIJKLM", scale)
segs2 = ttt_segments("NOPQRSTUVWXYZ", scale)
segs3 = ttt_segments("abcdefghijklm", scale)
# segs3 = ttt_segments( "m", 6400)
segs4 = ttt_segments("nopqrstuvwxyz", scale) # NOPQRSTUVWXYZ", 64000)
segs5 = ttt_segments("0123456789+-*/", scale)
# segs = ttt_segments( "A", 64000)
# segs2 = ttt_segments( "B", 64000)
# segs2=[]
dx = float(50000) / float(scale)
xt = -0.3
segs = translate(segs, xt * dx, 0.05 * dx)
segs = modify_segments(segs)
segs2 = translate(segs2, xt * dx, -0.05 * dx)
segs2 = modify_segments(segs2)
segs3 = translate(segs3, xt * dx, -0.15 * dx)
segs3 = modify_segments(segs3)
segs4 = translate(segs4, xt * dx, -0.22 * dx)
segs4 = modify_segments(segs4)
segs5 = translate(segs5, xt * dx, -0.32 * dx)
segs5 = modify_segments(segs5)
vd = ovd.VoronoiDiagram(far, 400)
print ovd.version()
vod = ovdvtk.VD(myscreen, vd, float(1), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
# vod.textScale = 0.000002
vod.textScale = 0.00005
vod.vertexRadius = 0.0011
vod.drawVertices = 0
vod.drawVertexIndex = 0
vod.drawGenerators = 0
vod.offsetEdges = 0
vod.drawNullEdges = 1
vd.setEdgeOffset(0.00005)
all_segs = segs + segs2 + segs3 + segs4 + segs5
# all_segs=segs
# all_segs=segs3 #+segs4
# all_segs = segs3
times = insert_many_polygons(vd, all_segs)
vd.check()
# ovd.PolygonInterior( True )
# ovd.MedialAxis()
vod.setVDText2(times)
vod.setAll()
# for v in vd.getFaceVertices(14705):
# print " drawing ", v
# vod.drawVertexIdx(v)
err = vd.getStat()
# print err
print "got errorstats for ", len(err), " points"
if len(err) > 1:
minerr = min(err)
maxerr = max(err)
print "min error= ", minerr
print "max error= ", maxerr
print "PYTHON All DONE."
myscreen.render()
# w2if.Modified()
# lwr.SetFileName("{0}.png".format(Nmax))
# lwr.Write()
myscreen.iren.Start()
|
aewallin/openvoronoi
|
python_examples/ttt/ttt_3_alphabet.py
|
Python
|
lgpl-2.1
| 7,151
|
[
"VTK"
] |
08c88dbd4ff4f1d485c76e37bd18d6183ff9548b650c991662aaffc26970ae62
|
#!/usr/bin/env python3
### Made by Ayden G.W for his ICS-201 Class.
### DUE: January 20th, 2017
# NOTE: Import all required packages.
import sys,os,random,time,platform
try:
import pyglet
except ModuleNotFoundError:
print("Run build.py first [Missing Pyglet]!")
sys.exit()
### REVIEW: Enhancements Completed:
### REVIEW: Sounds = True, Find = 'playSound'
### REVIEW: Classes/Objects = True, Find = 'entity'
### REVIEW: Graphics = False Find = ''
### REVIEW: 2-D Lists = True Find = 'cmds'
### NOTE: This game, with some modifications, can be 100% played by an AI, since
### NOTE: every entity class (including the player's) contains a 'useAIToAttack' function.
if platform.system() == "Windows":
curOS = "Windows"
elif platform.system() == "Linux":
curOS = "Linux"
else:
curOS = "?"
curVer = int(sys.version[:1])
if curVer == 2:
print("Python v2 is incompatable with ADF.")
exit()
# NOTE: You can edit the lines below this one.
# NOTE: NOT BELOW THIS.
info = [] # NOTE: Information about surroundings/events. This is a 2D list.
options = [] # NOTE: All options the player can select from at the moment. This is a 2D list.
cmds = [] # NOTE: All custom commands the player can execute. This is a 2D list.
depth = 0 # NOTE: Keeps track of the depth of the game.
continueGame = True # NOTE: Should we keep playing?
curPlyr = "" # NOTE: This value is to be set upon creation of the player with setCurrentPlayer(class). (This is done automatically)
soundsPath = "sounds/" # NOTE: This is the current path to the folder of sounds.
sysSPIsSet = False # NOTE: Is the system's SoundPalette set?
ents = 0
debug = True # NOTE: Should we display messages that are not needed?
superDebug = True # NOTE: Display super-not needed messages?
print("Current OS: "+str(curOS)+" & Python v"+str(curVer) if debug == True else "")
# NOTE: Error levels are as follows:
# NOTE: -1 = Super not needed. For debugging purposes.
# NOTE: 0 = Non critical; merely reporting an action.
# NOTE: 1 = Still non critical, but something went wrong. The script will still function as planned, but a feature may be exempt from execution.
# NOTE: 2 = A value may have been misassigned, therefore a whole function shall be skipped. The script may (or may not) function as planned.
# NOTE: 3 = A critical function is being skipped due to an error, the script will exit after reporting this.
# NOTE: 4 = Critical program-halt error. Something went VERY wrong.
def playSound(sndClass):
if isFile(sndClass.path) == True:
#report("Playing sound '"+str(sndClass.name)+"' ("+str(sndClass.soundFile)+").","playSound",0)
pyglet.media.load(sndClass.path).play()
return True
else:
report("Could not play "+str(sndClass.name)+", because the sound file is not present.","playSound",1)
return False
def playSoundFromPalette(palette,name):
for soundObj in palette.sounds:
if str(soundObj.name) == str(name):
soundObj.play()
def report(msg,module="?",errlvl=0,shouldPlayAudio=1,dbg=debug):
global sysSP
global superDebug
if errlvl < 0 and superDebug == False:
return
#if superDebug == True and errlvl <
if superDebug == True and debug == True and errlvl < 0:
sys.stdout.write(""+str(module)+" : ["+str(errlvl)+"] ; "+str(msg)+"\n")
return
if dbg == True or errlvl > 1:
sys.stdout.write(""+str(module)+" : ["+str(errlvl)+"] ; "+str(msg)+"\n")
if errlvl == 2:
if sysSP.contains("alert") == True and shouldPlayAudio == 1:
playSoundFromPalette(sysSP,"warning")
elif errlvl > 2:
if sysSP.contains("warning") == True and shouldPlayAudio == 1:
playSoundFromPalette(sysSP,"alert")
def isFile(fileName):
return bool(os.path.isfile(str(fileName)))
# NOTE: Class definitions take up 250+ lines.
class nothing(object):
"""docstring for nothing."""
def __init__(self):
super(nothing, self).__init__()
class entity(object):
"""docstring for entity."""
def __init__(self):
super(entity, self).__init__()
self.type = "ToBeDefined"
self.name = "ToBeDefined"
self.energy = 100
self.hp = 200
self.gold = 100
self.xp = 0
self.level = 1
self.inv = []
self.effects = []
self.soundPalette = soundPalette("")
self.xpToLevel = 100
self.eXPNPL = 100 # NOTE: extra XP Needed Per Level
self.levelInc = 1
global ents
ents = ents + 1
report("Initialized a new ent.","entityClass",0)
report("^ ent num: "+str(ents),"entityClass",-1)
def setSoundPalette(self,palette):
self.soundPalette = palette
def getHealth(self):
return self.hp
def setHealth(self,to):
self.hp = round(to)
def changeHealth(self,by):
self.hp = round(self.hp + by)
def changeHealth(self,by):
self.hp = round(self.hp + by)
def give(self,item): # NOTE: This can be a list of weapons. Formed: [ItemName,Class,Amount,[flags]] OR [[ItemName.Class,Amount,[flags]],[anotherone]]
if type(item) == list:
if type(item[0]) == list:
for wepLis in item:
self.inv.append(wepLis)
else:
self.inv.append(item)
else:
report("Tried to give an item, but the item != list.","entClass:Give",2)
def getWeapons(self):
retList = []
for thing in self.inv:
if "weapon" in str(thing[3]):
retList.append(thing)
return retList
def getGold(self):
return self.gold
def setGold(self,to):
self.gold = round(to)
def changeGold(self,by):
self.gold = round(self.gold + by)
def getLevel(self):
return self.level
def setLevel(self,to):
self.level = round(to)
def changeLevel(self,by):
self.level = round(self.level + by)
self.hp = self.hp + by * 2
def getXP(self):
return self.xp
def setXP(self,to):
self.xp = round(to)
def changeXP(self,by):
if self.xp + by >= self.xpToLevel:
self.level = self.level + self.levelInc
self.xp = 0
else:
self.xp = round(self.xp + by)
def useAIToAttack(self,tgt,smart=True):
if self.hp < 1:
return False,0,nothing()
weps = self.getWeapons()
if len(weps) < 1:
# NOTE: Make the AI run away.
report("AI tried to attack the player, without any assigned weapons.","entClass:useAIToAttack",2)
return False,0,nothing()
else:
# NOTE: Attack 'tgt'
for wep in weps:
wepClass = wep[1]
if smart == True:
if wepclass.dmg >= tgt.hp and wepClass.isBroken == False:
useWepClass = wepClass
else:
if wepClass.isBroken == False:
useWepClass = wepClass
if useWepClass == None:
# NOTE: Make the AI run away.
return False,0,nothing()
# NOTE: Then attack the player with the chosen weapon.
dmgDone = tgt.getHealth()
tgt.changeHealth(-useWepClass.dmg)
dmgDone = dmgDone - tgt.getHealth()
useWepClass.changeUses(-1)
tgt.applyEffect(*useWepClass.effects) # NOTE: The star dumps everything in the effects list into the arg. from [["bleeding"]] to ["bleeding"]
return True,dmgDone,useWepClass
def getEffects(self):
return self.effects
def clearEffects(self):
del self.effects[:]
def remEffect(self,effect):
if effect in self.effects:
del self.effects[effect]
def applyEffect(self,effect=0): # NOTE: Formed: class.applyEffect(["EffectName",effectMultiplier])
if effect == 0:
return
foundActive = False
for eff in self.effects: # NOTE: First, check if the effect is already in current effects.
if effect[0] == eff[0]:
write("That effect is already active. Now is ("+str(eff)+")\n" if debug == True else "")
eff[1] = eff[1] + effect[1] // 2
foundActive = True
if foundActive != True:
self.effects.append(effect)
write("Applied a new affect on "+self.name+". ("+effect[0]+")\n")
class weapon(object): # NOTE: Weapon in inv: [ItemName,Class,Amount,[flags]]
"""docstring for weapon."""
def __init__(self):
super(weapon, self).__init__()
self.type = "weapon"
self.name = "ToBeDefined"
self.dmg = 0
self.dualwieldable = True
self.effects = [] # NOTE: Formed: [[Effect,effectMultiplier]]
self.uses = 50
self.useStr = "ToBeDefined"
self.isBroken = False
def changeUses(self,by):
self.uses = round(self.uses + by)
class knife(weapon):
"""docstring for knife."""
def __init__(self):
super(knife, self).__init__()
self.name = "Knife"
self.dmg = 10
self.effects = [["bleeding",1.75]]
self.uses = 25
self.useStr = "You slash your knife at "
class claws(weapon):
"""docstring for claws."""
def __init__(self):
super(claws, self).__init__()
self.name = "Claws"
self.dmg = 50
self.uses = 30
self.useStr = "You slash your claws at "
class pistol(weapon):
"""docstring for pistol."""
def __init__(self):
super(pistol, self).__init__()
self.name = "Pistol"
self.dmg = 45
self.effects = [["bleeding",3]]
self.uses = random.randrange(30,65)
self.useStr = "You shot your gun at "
class player(entity):
"""docstring for player."""
def __init__(self,name="Player"):
entity.__init__(self)
super(player, self).__init__()
self.type = "player"
self.name = name
class ghoul(entity):
"""docstring for ghoul."""
def __init__(self,name="Ghoul"):
super(ghoul, self).__init__()
self.type = "mob"
self.name = name
self.hp = 60
#self.give(["Claws",claws(),1,["weapon"]])
def use(self,tgt):
tgt.hp = tgt.hp - self.dmg
def getDmg(self):
return self.dmg
def setDmg(self,to):
self.dmg = to
def getUses(self):
return self.uses
def setUses(self,to):
self.uses = to
class sound(object):
"""docstring for sound."""
def __init__(self,soundFile,name):
super(sound, self).__init__()
self.name = name
self.path = str(soundsPath) + str(soundFile)
self.soundFile = str(soundFile) if isFile(self.path) == True else "INVALIDFILE"
if isFile(self.path) != True:
report("Initialized sound with invalid path! ('"+str(self.path)+"')","soundClass",2,0)
def play(self):
try:
playSound(self)
except pyglet.media.riff.WAVEFormatException:
report("Failed to play sound. (AVbin missing?)","soundClass:play",2,0)
def set(self,to):
self.soundFile = str(to)
class soundPalette(object): # NOTE: Sound mixer for a genre of sounds. I.E dmgNoises = mixer().addNoise(sound("grunt.ogg","Grunt")), etc.
"""docstring for soundPalette."""
def __init__(self,name="inv"):
super(soundPalette, self).__init__()
self.name = name if name != "inv" else "noName"
self.sounds = []
if self.name == "noName":
report("Initialized a soundPalette with no valid name.","soundPalette",1)
def add(self,soundObject):
if soundObject not in self.sounds:
self.sounds.append(soundObject)
report("Added sound '"+str(soundObject.path)+"' to '"+str(self.name)+"'.","soundPalette",0)
def rem(self,soundObject):
if soundObject in self.sounds and soundObject.name in self.soundNames and type(soundObject) != str:
self.sounds.remove(soundObject)
def getSounds(self):
return self.sounds
def getSoundNames(self):
retList = []
for soundObj in self.sounds:
retList.append(soundObj.name)
return retList
def getPos(self,pos):
if len(self.sounds) >= pos:
return self.sounds[pos]
else:
return False
def contains(self,soundName):
if type(soundName) == list:
#report("soundName is List.","SPClass:contains",-1)
retList = []
snds = self.getSoundNames()
for nam in soundName:
if nam not in snds:
retList.append(nam)
if len(retList) == 0:
return True,[]
else:
return False,retList
elif type(soundName) == str:
#report("soundName is Str.","SPClass:contains",-1)
for soundObj in self.sounds:
if soundObj.name == str(soundName):
#report("returning True,[""]","SPClass:contains",-1)
return True
#report("returning False,"+str([soundName]),"SPClass:contains",-1)
return False
def play(self,soundName):
for soundObj in self.sounds:
if str(soundObj.name) == str(soundName):
soundObj.play()
# NOTE: Sound palettes (there *should be every sound with the fowllowing names in every entity.)
# NOTE: System sound palette:
# NOTE: selection
# NOTE: alert
# NOTE: warning
# NOTE: invalidInput
# NOTE: Init def. sys sound palette
sysSP = soundPalette("sysSP")
sysSP.add(sound("default/selection.ogg","selection"))
sysSP.add(sound("default/alert.ogg","alert"))
sysSP.add(sound("default/warning.ogg","warning"))
sysSP.add(sound("default/invalidInput.ogg","invalidInput"))
sysSP.add(sound("default/invalidInput.ogg","init"))
# NOTE: Entity sound palette:
# NOTE: hurt
# NOTE: healed
# NOTE: killed
# NOTE: Init def. player sound palette
def_plyrSP = soundPalette("plyrSP")
# def_plyrSP.add(sound("default/hurt.ogg","hurt"))
# def_plyrSP.add(sound("default/healed.ogg","healed"))
# def_plyrSP.add(sound("default/killed.ogg","killed"))
# NOTE: Combat Announcer sound palette:
# NOTE: beginCombat
# NOTE: announcerWin
# NOTE: announcerLoss
def setSysSP(SP):
global sysSPIsSet
sysSPIsSet = True
sysSP = SP
cont,exc = SP.contains(["selection","alert","warning","invalidInput","fightWon"])
if cont == False:
report("Missing ("+str(exc)+") entry(s) in sysSoundPalette. They will be exempt from playback.","setSysSP",1)
else:
report("Set sysSP; No entrys missing.","setSysSP",0)
dd,ff = SP.contains(["init"])
if dd == True:
SP.play("init")
setSysSP(sysSP)
def write(stuff): # NOTE: Supports printing lists.
if stuff == "" or stuff == None:
return
if stuff is list:
for thing in stuff:
write(str(thing)+"\n")
else:
sys.stdout.write(str(stuff))
def cleanup(): # NOTE: DEPRECATED
remAllInfo()
remAllOptions() # NOTE: Cleanup stuff.
#clearScreen()
def getAllCmds():
allCmds = []
for cmd in cmds:
for obj in cmd:
if callable(obj) == False and type(obj) == str:
allCmds.append(obj)
return allCmds
def getInput(query=""): # This is for me, because Linux HMe-5.6 Does not support Python 3.x
validInput = False
tst = "pass"
write(str(query)+"\n> ")
sys.stdout.flush()
if platform.system() == "Linux":
while validInput == False:
try:
#tst = raw_input()
tst = input()
except KeyboardInterrupt:
write("\n\n\n\nGot Keyboard interrupt. Closing.\n\n")
exit()
if tst != None and tst != "":
validInput = True
elif platform.system() == "Windows":
while validInput == False:
try:
tst = input()
except KeyboardInterrupt:
write("\n\n\n\nGot Keyboard interrupt. Closing.\n\n")
exit()
if tst != None and tst != "":
validInput = True
if str(tst) in str(range(-1,len(options)+1)):
#print("Input was registered as an int.\n" if debug == True else "")
return int(tst)
else:
#print("Input was registered as an str.\n" if debug == True else "")
return str(tst)
def clearScreen(): # NOTE: This clears the screen. Supports Windows, and Linux, NOT WING IDE.
if debug == False:
if curOS == "Linux":
os.system("clear")
plyr = player("ToBeDefined")
def timedBusyWait(tim):
waitTime = time.time() + tim
beforeTime = time.time()
try: # NOTE: Use try, so that if the user CTRL+C's the script, it won't vomit errors everywhere.
while time.time() < waitTime: # NOTE: Use < instead of != to solve endless looping in case of cpu lag during the busy wait.
pass
except KeyboardInterrupt:
exit()
def randTimedWait(min,max):
timedBusyWait(round(random.randrange(min,max)))
# NOTE: Info block:
def addInfo(inf): # NOTE: This adds info, and can even be a list of info!
if type(inf) is list:
for information in inf:
info.append(information)
else:
info.append(inf)
def remAllInfo():
global info
#for thing in info:
#info.remove(thing)
del info[:]
def printInfo():
pos = 0
for element in info:
pos = pos + 1
write(str(pos)+" * "+str(element)+"\n" if debug == True else str(element)+"\n")
write("\n")
# NOTE: Option block
def addOption(option): # NOTE: This adds an option, and can even be a list of options!
if type(option) is list:
for opt in option:
options.append(opt)
else:
options.append(option)
def remAllOptions():
global options
#for thing in options:
#options.remove(thing)
del options[:]
def printOptions():
recursion = 0
for option in options:
recursion = recursion + 1
write(str(recursion)+") "+str(option)+"\n")
# NOTE: Utilities
def initLevel(inf,opt): # NOTE: DEPRECATED
addInfo(inf)
addOption(opt)
printInfo()
printOptions()
return getInput()
# NOTE: Custom user commands
def addCommand(cmd): # NOTE: 'cmd' is supposed to be a list consisting of [cmdStr,cmdCallback]
if type(cmd) == list:
cmds.append(cmd)
def resolveInput(inp):
global options
if str(inp) in str(range(-1,len(options))):
report("Returned choice selection. ['choice',"+str(inp)+"]","resolveInput",0)
return "choice",inp
if type(inp) == str: # NOTE: Make sure the user's input was actually a string, and not a choice selection.
for cmd in cmds: # NOTE: Iterate through list of commands
if cmd[0] in inp: # NOTE: Check if user input contains the command. (This is so we can allow function arguments.)
args = [] # NOTE: Make Args list to use as the only arg for the function call.
argsStr = inp.replace(cmd[0],"")# NOTE: Replace the actual CMD call with empty space to leave only the arg(s).
if argsStr[:1] == " ":
argsStr = argsStr[1:] # NOTE: The above operation will leave an empty space at the beginning of argsStr, so remove it.
args = argsStr.split(" ")
report("User called '"+cmd[0]+"', with args '"+str(args)+"'.\n","resolveInput",0)
for obj in cmd: # NOTE: Iterate through the command's list. This allows us to have multiple functions per list.
if callable(obj) == True: # NOTE: Then we know it is a function.
obj([argsStr]) # NOTE: Then call it with the available arg(s)
return "cmd",inp
else:
#write("'"+inp+"' != '"+cmd[0]+"'\n","resolveInput",0)
pass
return "cmd",inp
def callCommand(commandStr,argsList):
for cmd in cmds:
if commandStr in cmd[0]:
for obj in cmd:
if callable(obj) == True:
obj(argsList)
report("Called "+str(cmd[0])+" with args "+str(argsList),"callCommand",0)
return
# NOTE: Combat system
def printMoves(plyr): # NOTE: Print all available attacks
weps = plyr.getWeapons()
wepNamesList = []
recursion = 0
for wep in weps:
wepNamesList.append(wep[0])
if len(wepNamesList) == 0:
return False,[]
for wep in weps:
recursion = recursion + 1
wepClass = wep[1]
write(str(recursion)+") "+str(wepNamesList[recursion-1])+" (Damage: "+str(wepClass.dmg)+", Uses left: "+str(wepClass.uses)+")\n")
return True,weps
def enterCombat(plyr,mob,palette,mod=1): # NOTE: 'mod' is the gold multiplier.
write("Entering combat...\n\n")
isInCombat = True
timedBusyWait(random.randrange(3,5))
write("A "+mob.name+" has appeared! His level is "+str(mob.level)+". Fight!\n")
while isInCombat == True:
timedBusyWait(random.randrange(3,5))
#clearScreen()
write("\nYour Health: "+str(plyr.hp)+"\nEnemy Health: "+str(mob.hp)+"\n\n")
hasWeps,l = printMoves(plyr)
if hasWeps == False:
write("You have no weapons! Running away. (You should visit the shop)\n")
write("Running...\n\n")
timedBusyWait(1.5)
chance = random.randrange(0,100)
if chance < 30:
write("The "+mob.name+" caught your leg when you tried to run.\nYou kicked him off, but you messed up your knee pretty bad.\n\n")
plyr.changeHealth(-chance/1.5)
write("Your Health: "+str(plyr.hp)+"\n")
else:
write("You got away.\nYour Health: "+str(plyr.hp)+"\n")
print("ChanceVar = "+str(chance) if debug == True else "")
return False,"ran"
xpGain = 5 * plyr.getLevel()
if mob.hp < 1:
isInCombat = False
goldGain = mob.getGold() * mod
xpGain = xpGain + 25 * (plyr.getXP() // 2 if plyr.getXP() > 2 else 2 // 2)
plyr.changeGold(goldGain)
plyr.changeXP(xpGain)
palette.play("fightWon")
write("\n\nYou won the battle!\nYou gained "+str(round(goldGain))+" Gold & "+str(round(xpGain))+" XP. \n\n")
timedBusyWait(random.randrange(3,5))
return True,"won"
sel = getInput()
resolved,choice = resolveInput(sel)
if str(choice) in str(range(-1,len(plyr.getWeapons()))): # NOTE: Then we know the choice is valid. Follow through with using the weapon.
wep = plyr.getWeapons()[int(choice)-1]
wepClass = wep[1]
write(wepClass.useStr+"him.\n")
mob.changeHealth(-wepClass.dmg)
wepClass.uses = wepClass.uses - 1
# NOTE: Now let the AI respond by attacking the player if it can.
attkd,dmgDone,wepUsed = mob.useAIToAttack(plyr,True if plyr.getLevel() > 5 else False)
if attkd == True:
write("The "+mob.name+" attacked you, and did "+str(dmgDone)+" damage.\n")
else:
write("The "+mob.name+" did not attack you.\n")
if plyr.getHealth() < 1:
write("\n\n\n\n YOU DIED. GAME OVER.\n\n\n\n")
exit()
# NOTE: Load bundled commands.
def printHelp(args=[]):
recursion = 0
allCmds = ""
write("\nPrinting all commands.\n")
for cmd in cmds:
recursion = recursion + 1
allCmds = allCmds + "\nCMD #" + str(recursion) + " : " + str(cmd[0] + "\n")
write(allCmds+"\n")
timedBusyWait(1)
def printInventory(args=[]):
global plyr
allThings = ""
for thing in plyr.inv:
if thing[1] == object:
allThings = allThings + str(thing[0]) + "\n"
else:
allThings = allThings + str(thing[0]) + " (" + str(thing[1]) + ")\n"
write(allThings)
timedBusyWait(1)
def use(args=[]):
global plyr
argsStr = ""
for arg in args:
argsStr = argsStr + arg
write("Used. Args = "+argsStr+"\n" if debug == True else "")
if len(args) > 0:
for obj in inv:
pass
write("Loaded Core.\n\n" if debug == True else "")
|
MTadder/A-Dark-Forest
|
ADF.py
|
Python
|
mit
| 25,115
|
[
"ADF",
"VisIt"
] |
8ea6b1986c18e4ac73483c9c9c30bcccef3283bc07a67e18a7044aec52102182
|
"""
Test functions for stats module
"""
import warnings
import re
import sys
import pickle
from pathlib import Path
import os
import json
import platform
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns,
assert_array_less, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import numpy
import numpy as np
from numpy import typecodes, array
from numpy.lib.recfunctions import rec_append_fields
from scipy import special
from scipy._lib._util import check_random_state
from scipy.integrate import (IntegrationWarning, quad, trapezoid,
cumulative_trapezoid)
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy, polygamma, entr
from scipy.stats._distr_params import distcont, invdistcont
from .test_discrete_basic import distdiscrete, invdistdiscrete
from scipy.stats._continuous_distns import FitDataError, _argus_phi
from scipy.optimize import root, fmin
from itertools import product
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Failing on macOS 11, Intel CPUs. See gh-14901
MACOS_INTEL = (sys.platform == 'darwin') and (platform.machine() == 'x86_64')
# distributions to skip while testing the fix for the support method
# introduced in gh-13294. These distributions are skipped as they
# always return a non-nan support for every parametrization.
skip_test_support_gh13294_regression = ['tukeylambda', 'pearson3']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
def check_vonmises_pdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_distributions_submodule():
actual = set(scipy.stats.distributions.__all__)
continuous = [dist[0] for dist in distcont] # continuous dist names
discrete = [dist[0] for dist in distdiscrete] # discrete dist names
other = ['rv_discrete', 'rv_continuous', 'rv_histogram',
'entropy', 'trapz']
expected = continuous + discrete + other
# need to remove, e.g.,
# <scipy.stats._continuous_distns.trapezoid_gen at 0x1df83bbc688>
expected = set(filter(lambda s: not str(s).startswith('<'), expected))
assert actual == expected
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
check_vonmises_pdf_periodic(k, 0, 1, x)
check_vonmises_pdf_periodic(k, 1, 1, x)
check_vonmises_pdf_periodic(k, 0, 10, x)
check_vonmises_cdf_periodic(k, 0, 1, x)
check_vonmises_cdf_periodic(k, 1, 1, x)
check_vonmises_cdf_periodic(k, 0, 10, x)
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
# Expected values of the vonmises PDF were computed using
# mpmath with 50 digits of precision:
#
# def vmpdf_mp(x, kappa):
# x = mpmath.mpf(x)
# kappa = mpmath.mpf(kappa)
# num = mpmath.exp(kappa*mpmath.cos(x))
# den = 2 * mpmath.pi * mpmath.besseli(0, kappa)
# return num/den
#
@pytest.mark.parametrize('x, kappa, expected_pdf',
[(0.1, 0.01, 0.16074242744907072),
(0.1, 25.0, 1.7515464099118245),
(0.1, 800, 0.2073272544458798),
(2.0, 0.01, 0.15849003875385817),
(2.0, 25.0, 8.356882934278192e-16),
(2.0, 800, 0.0)])
def test_vonmises_pdf(x, kappa, expected_pdf):
pdf = stats.vonmises.pdf(x, kappa)
assert_allclose(pdf, expected_pdf, rtol=1e-15)
def _assert_less_or_close_loglike(dist, data, func, **kwds):
"""
This utility function checks that the log-likelihood (computed by
func) of the result computed using dist.fit() is less than or equal
to the result computed using the generic fit method. Because of
normal numerical imprecision, the "equality" check is made using
`np.allclose` with a relative tolerance of 1e-15.
"""
mle_analytical = dist.fit(data, **kwds)
numerical_opt = super(type(dist), dist).fit(data, **kwds)
ll_mle_analytical = func(mle_analytical, data)
ll_numerical_opt = func(numerical_opt, data)
assert (ll_mle_analytical <= ll_numerical_opt or
np.allclose(ll_mle_analytical, ll_numerical_opt, rtol=1e-15))
def assert_fit_warnings(dist):
param = ['floc', 'fscale']
if dist.shapes:
nshapes = len(dist.shapes.split(","))
param += ['f0', 'f1', 'f2'][:nshapes]
all_fixed = dict(zip(param, np.arange(len(param))))
data = [1, 2, 3]
with pytest.raises(RuntimeError,
match="All parameters fixed. There is nothing "
"to optimize."):
dist.fit(data, **all_fixed)
with pytest.raises(RuntimeError,
match="The data contains non-finite values"):
dist.fit([np.nan])
with pytest.raises(RuntimeError,
match="The data contains non-finite values"):
dist.fit([np.inf])
with pytest.raises(TypeError, match="Unknown keyword arguments:"):
dist.fit(data, extra_keyword=2)
with pytest.raises(TypeError, match="Too many positional arguments."):
dist.fit(data, *[1]*(len(param) - 1))
@pytest.mark.parametrize('dist',
['alpha', 'betaprime',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald'])
def test_support(dist):
"""gh-6235"""
dct = dict(distcont)
args = dct[dist]
dist = getattr(stats, dist)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
class TestRandInt:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = np.linspace(0, 36, 100)
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestArcsine:
def test_endpoints(self):
# Regression test for gh-13697. The following calculation
# should not generate a warning.
p = stats.arcsine.pdf([0, 1])
assert_equal(p, [np.inf, np.inf])
class TestBernoulli:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford:
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestChi:
# "Exact" value of chi.sf(10, 4), as computed by Wolfram Alpha with
# 1 - CDF[ChiDistribution[4], 10]
CHI_SF_10_4 = 9.83662422461598e-21
# "Exact" value of chi.mean(df=1000) as computed by Wolfram Alpha with
# Mean[ChiDistribution[1000]]
CHI_MEAN_1000 = 31.614871896980
def test_sf(self):
s = stats.chi.sf(10, 4)
assert_allclose(s, self.CHI_SF_10_4, rtol=1e-15)
def test_isf(self):
x = stats.chi.isf(self.CHI_SF_10_4, 4)
assert_allclose(x, 10, rtol=1e-15)
def test_mean(self):
x = stats.chi.mean(df=1000)
assert_allclose(x, self.CHI_MEAN_1000, rtol=1e-12)
class TestNBinom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGenInvGauss:
def setup_method(self):
np.random.seed(1234)
@pytest.mark.slow
def test_rvs_with_mode_shift(self):
# ratio_unif w/ mode shift
gig = stats.geninvgauss(2.3, 1.5)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_without_mode_shift(self):
# ratio_unif w/o mode shift
gig = stats.geninvgauss(0.9, 0.75)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_new_method(self):
# new algorithm of Hoermann / Leydold
gig = stats.geninvgauss(0.1, 0.2)
_, p = stats.kstest(gig.rvs(size=1500, random_state=1234), gig.cdf)
assert_equal(p > 0.05, True)
@pytest.mark.slow
def test_rvs_p_zero(self):
def my_ks_check(p, b):
gig = stats.geninvgauss(p, b)
rvs = gig.rvs(size=1500, random_state=1234)
return stats.kstest(rvs, gig.cdf)[1] > 0.05
# boundary cases when p = 0
assert_equal(my_ks_check(0, 0.2), True) # new algo
assert_equal(my_ks_check(0, 0.9), True) # ratio_unif w/o shift
assert_equal(my_ks_check(0, 1.5), True) # ratio_unif with shift
def test_rvs_negative_p(self):
# if p negative, return inverse
assert_equal(
stats.geninvgauss(-1.5, 2).rvs(size=10, random_state=1234),
1 / stats.geninvgauss(1.5, 2).rvs(size=10, random_state=1234))
def test_invgauss(self):
# test that invgauss is special case
ig = stats.geninvgauss.rvs(size=1500, p=-0.5, b=1, random_state=1234)
assert_equal(stats.kstest(ig, 'invgauss', args=[1])[1] > 0.15, True)
# test pdf and cdf
mu, x = 100, np.linspace(0.01, 1, 10)
pdf_ig = stats.geninvgauss.pdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(pdf_ig, stats.invgauss(mu).pdf(x))
cdf_ig = stats.geninvgauss.cdf(x, p=-0.5, b=1 / mu, scale=mu)
assert_allclose(cdf_ig, stats.invgauss(mu).cdf(x))
def test_pdf_R(self):
# test against R package GIGrvg
# x <- seq(0.01, 5, length.out = 10)
# GIGrvg::dgig(x, 0.5, 1, 1)
vals_R = np.array([2.081176820e-21, 4.488660034e-01, 3.747774338e-01,
2.693297528e-01, 1.905637275e-01, 1.351476913e-01,
9.636538981e-02, 6.909040154e-02, 4.978006801e-02,
3.602084467e-02])
x = np.linspace(0.01, 5, 10)
assert_allclose(vals_R, stats.geninvgauss.pdf(x, 0.5, 1))
def test_pdf_zero(self):
# pdf at 0 is 0, needs special treatment to avoid 1/x in pdf
assert_equal(stats.geninvgauss.pdf(0, 0.5, 0.5), 0)
# if x is large and p is moderate, make sure that pdf does not
# overflow because of x**(p-1); exp(-b*x) forces pdf to zero
assert_equal(stats.geninvgauss.pdf(2e6, 50, 2), 0)
class TestGenHyperbolic:
def setup_method(self):
np.random.seed(1234)
def test_pdf_r(self):
# test against R package GeneralizedHyperbolic
# x <- seq(-10, 10, length.out = 10)
# GeneralizedHyperbolic::dghyp(
# x = x, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5
# )
vals_R = np.array([
2.94895678275316e-13, 1.75746848647696e-10, 9.48149804073045e-08,
4.17862521692026e-05, 0.0103947630463822, 0.240864958986839,
0.162833527161649, 0.0374609592899472, 0.00634894847327781,
0.000941920705790324
])
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(-10, 10, 10)
assert_allclose(gh.pdf(x), vals_R, atol=0, rtol=1e-13)
def test_cdf_r(self):
# test against R package GeneralizedHyperbolic
# q <- seq(-10, 10, length.out = 10)
# GeneralizedHyperbolic::pghyp(
# q = q, lambda = 2, alpha = 2, beta = 1, delta = 1.5, mu = 0.5
# )
vals_R = np.array([
1.01881590921421e-13, 6.13697274983578e-11, 3.37504977637992e-08,
1.55258698166181e-05, 0.00447005453832497, 0.228935323956347,
0.755759458895243, 0.953061062884484, 0.992598013917513,
0.998942646586662
])
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(-10, 10, 10)
assert_allclose(gh.cdf(x), vals_R, atol=0, rtol=1e-6)
def test_moments_r(self):
# test against R package GeneralizedHyperbolic
# sapply(1:4,
# function(x) GeneralizedHyperbolic::ghypMom(
# order = x, lambda = 2, alpha = 2,
# beta = 1, delta = 1.5, mu = 0.5,
# momType = 'raw')
# )
vals_R = [2.36848366948115, 8.4739346779246,
37.8870502710066, 205.76608511485]
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
vals_us = [
stats.genhyperbolic(*args, loc=mu, scale=delta).moment(i)
for i in range(1, 5)
]
assert_allclose(vals_us, vals_R, atol=0, rtol=1e-13)
def test_rvs(self):
# Kolmogorov-Smirnov test to ensure alignemnt
# of analytical and empirical cdfs
lmbda, alpha, beta = 2, 2, 1
mu, delta = 0.5, 1.5
args = (lmbda, alpha*delta, beta*delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
_, p = stats.kstest(gh.rvs(size=1500, random_state=1234), gh.cdf)
assert_equal(p > 0.05, True)
def test_pdf_t(self):
# Test Against T-Student with 1 - 30 df
df = np.linspace(1, 30, 10)
# in principle alpha should be zero in practice for big lmbdas
# alpha cannot be too small else pdf does not integrate
alpha, beta = np.float_power(df, 2)*np.finfo(np.float32).eps, 0
mu, delta = 0, np.sqrt(df)
args = (-df/2, alpha, beta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.t.pdf(x, df),
atol=0, rtol=1e-6
)
def test_pdf_cauchy(self):
# Test Against Cauchy distribution
# in principle alpha should be zero in practice for big lmbdas
# alpha cannot be too small else pdf does not integrate
lmbda, alpha, beta = -0.5, np.finfo(np.float32).eps, 0
mu, delta = 0, 1
args = (lmbda, alpha, beta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.cauchy.pdf(x),
atol=0, rtol=1e-6
)
def test_pdf_laplace(self):
# Test Against Laplace with location param [-10, 10]
loc = np.linspace(-10, 10, 10)
# in principle delta should be zero in practice for big loc delta
# cannot be too small else pdf does not integrate
delta = np.finfo(np.float32).eps
lmbda, alpha, beta = 1, 1, 0
args = (lmbda, alpha*delta, beta*delta)
# ppf does not integrate for scale < 5e-4
# therefore using simple linspace to define the support
gh = stats.genhyperbolic(*args, loc=loc, scale=delta)
x = np.linspace(-20, 20, 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.laplace.pdf(x, loc=loc, scale=1),
atol=0, rtol=1e-11
)
def test_pdf_norminvgauss(self):
# Test Against NIG with varying alpha/beta/delta/mu
alpha, beta, delta, mu = (
np.linspace(1, 20, 10),
np.linspace(0, 19, 10)*np.float_power(-1, range(10)),
np.linspace(1, 1, 10),
np.linspace(-100, 100, 10)
)
lmbda = - 0.5
args = (lmbda, alpha * delta, beta * delta)
gh = stats.genhyperbolic(*args, loc=mu, scale=delta)
x = np.linspace(gh.ppf(0.01), gh.ppf(0.99), 50)[:, np.newaxis]
assert_allclose(
gh.pdf(x), stats.norminvgauss.pdf(
x, a=alpha, b=beta, loc=mu, scale=delta),
atol=0, rtol=1e-13
)
class TestNormInvGauss:
def setup_method(self):
np.random.seed(1234)
def test_cdf_R(self):
# test pdf and cdf vals against R
# require("GeneralizedHyperbolic")
# x_test <- c(-7, -5, 0, 8, 15)
# r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5)
# r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5)
r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01,
9.988650664e-01, 9.999848769e-01])
x_test = np.array([-7, -5, 0, 8, 15])
vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5)
assert_allclose(vals_cdf, r_cdf, atol=1e-9)
def test_pdf_R(self):
# values from R as defined in test_cdf_R
r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01,
7.450485342e-04, 8.917889931e-06])
x_test = np.array([-7, -5, 0, 8, 15])
vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5)
assert_allclose(vals_pdf, r_pdf, atol=1e-9)
def test_stats(self):
a, b = 1, 0.5
gamma = np.sqrt(a**2 - b**2)
v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)),
3.0 * (1 + 4 * b**2 / a**2) / gamma)
assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk'))
def test_ppf(self):
a, b = 1, 0.5
x_test = np.array([0.001, 0.5, 0.999])
vals = stats.norminvgauss.ppf(x_test, a, b)
assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b))
class TestGeom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
def test_ppf_underflow(self):
# this should not underflow
assert_allclose(stats.geom.ppf(1e-20, 1e-20), 1.0, atol=1e-14)
class TestPlanck:
def setup_method(self):
np.random.seed(1234)
def test_sf(self):
vals = stats.planck.sf([1, 2, 3], 5.)
expected = array([4.5399929762484854e-05,
3.0590232050182579e-07,
2.0611536224385579e-09])
assert_array_almost_equal(vals, expected)
def test_logsf(self):
vals = stats.planck.logsf([1000., 2000., 3000.], 1000.)
expected = array([-1001000., -2001000., -3001000.])
assert_array_almost_equal(vals, expected)
class TestGennorm:
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm:
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestLaplaceasymmetric:
def test_laplace(self):
# test against Laplace (special case for kappa=1)
points = np.array([1, 2, 3])
pdf1 = stats.laplace_asymmetric.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_allclose(pdf1, pdf2)
def test_asymmetric_laplace_pdf(self):
# test assymetric Laplace
points = np.array([1, 2, 3])
kappa = 2
kapinv = 1/kappa
pdf1 = stats.laplace_asymmetric.pdf(points, kappa)
pdf2 = stats.laplace_asymmetric.pdf(points*(kappa**2), kapinv)
assert_allclose(pdf1, pdf2)
def test_asymmetric_laplace_log_10_16(self):
# test assymetric Laplace
points = np.array([-np.log(16), np.log(10)])
kappa = 2
pdf1 = stats.laplace_asymmetric.pdf(points, kappa)
cdf1 = stats.laplace_asymmetric.cdf(points, kappa)
sf1 = stats.laplace_asymmetric.sf(points, kappa)
pdf2 = np.array([1/10, 1/250])
cdf2 = np.array([1/5, 1 - 1/500])
sf2 = np.array([4/5, 1/500])
ppf1 = stats.laplace_asymmetric.ppf(cdf2, kappa)
ppf2 = points
isf1 = stats.laplace_asymmetric.isf(sf2, kappa)
isf2 = points
assert_allclose(np.concatenate((pdf1, cdf1, sf1, ppf1, isf1)),
np.concatenate((pdf2, cdf2, sf2, ppf2, isf2)))
class TestTruncnorm:
def setup_method(self):
np.random.seed(1234)
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that used to fail because of extreme tailness.
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low <= x.min() <= x.max() <= high), str([low, high, x])
# Check some additional extreme tails
low, high = 1000, 1001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = 10000, 10001
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
low, high = -10001, -10000
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_9403_nontail_values(self):
for low, high in [[3, 4], [-4, -3]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.3619772, 0.1015229, 0])
if low < 0:
expected_pdfs = np.array([0, 0.1015229, 3.3619772, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
low + 0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*3.1984741, high])
assert_almost_equal(ppfs, expected_ppfs)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.8475544278436675)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.1524455721563326)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.8475544278436675)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.1524455721563326)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
def test_gh_9403_medium_tail_values(self):
for low, high in [[39, 40], [-40, -39]]:
xvals = np.array([-np.inf, low, high, np.inf])
xmid = (high+low)/2.0
cdfs = stats.truncnorm.cdf(xvals, low, high)
sfs = stats.truncnorm.sf(xvals, low, high)
pdfs = stats.truncnorm.pdf(xvals, low, high)
expected_cdfs = np.array([0, 0, 1, 1])
expected_sfs = np.array([1.0, 1.0, 0.0, 0.0])
expected_pdfs = np.array([0, 3.90256074e+01, 2.73349092e-16, 0])
if low < 0:
expected_pdfs = np.array([0, 2.73349092e-16,
3.90256074e+01, 0])
assert_almost_equal(cdfs, expected_cdfs)
assert_almost_equal(sfs, expected_sfs)
assert_almost_equal(pdfs, expected_pdfs)
assert_almost_equal(np.log(expected_pdfs[1]/expected_pdfs[2]),
low + 0.5)
pvals = np.array([0, 0.5, 1.0])
ppfs = stats.truncnorm.ppf(pvals, low, high)
expected_ppfs = np.array([low, np.sign(low)*39.01775731, high])
assert_almost_equal(ppfs, expected_ppfs)
cdfs = stats.truncnorm.cdf(ppfs, low, high)
assert_almost_equal(cdfs, pvals)
if low < 0:
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
0.9999999970389126)
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
2.961048103554866e-09)
else:
assert_almost_equal(stats.truncnorm.cdf(xmid, low, high),
0.9999999970389126)
assert_almost_equal(stats.truncnorm.sf(xmid, low, high),
2.961048103554866e-09)
pdf = stats.truncnorm.pdf(xmid, low, high)
assert_almost_equal(np.log(pdf/expected_pdfs[2]), (xmid+0.25)/2)
xvals = np.linspace(low, high, 11)
xvals2 = -xvals[::-1]
assert_almost_equal(stats.truncnorm.cdf(xvals, low, high),
stats.truncnorm.sf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.sf(xvals, low, high),
stats.truncnorm.cdf(xvals2, -high, -low)[::-1])
assert_almost_equal(stats.truncnorm.pdf(xvals, low, high),
stats.truncnorm.pdf(xvals2, -high, -low)[::-1])
def _test_moments_one_range(self, a, b, expected, decimal_s=7):
m0, v0, s0, k0 = expected[:4]
m, v, s, k = stats.truncnorm.stats(a, b, moments='mvsk')
assert_almost_equal(m, m0)
assert_almost_equal(v, v0)
assert_almost_equal(s, s0, decimal=decimal_s)
assert_almost_equal(k, k0)
@pytest.mark.xfail_on_32bit("reduced accuracy with 32bit platforms.")
def test_moments(self):
# Values validated by changing TRUNCNORM_TAIL_X so as to evaluate
# using both the _norm_XXX() and _norm_logXXX() functions, and by
# removing the _stats and _munp methods in truncnorm tp force
# numerical quadrature.
# For m,v,s,k expect k to have the largest error as it is
# constructed from powers of lower moments
self._test_moments_one_range(-30, 30, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-10, 10, [0, 1, 0.0, 0.0])
self._test_moments_one_range(-3, 3, [0.0, 0.9733369246625415,
0.0, -0.1711144363977444])
self._test_moments_one_range(-2, 2, [0.0, 0.7737413035499232,
0.0, -0.6344632828703505])
self._test_moments_one_range(0, np.inf, [0.7978845608028654,
0.3633802276324186,
0.9952717464311565,
0.8691773036059725])
self._test_moments_one_range(-np.inf, 0, [-0.7978845608028654,
0.3633802276324186,
-0.9952717464311565,
0.8691773036059725])
self._test_moments_one_range(-1, 3, [0.2827861107271540,
0.6161417353578292,
0.5393018494027878,
-0.2058206513527461])
self._test_moments_one_range(-3, 1, [-0.2827861107271540,
0.6161417353578292,
-0.5393018494027878,
-0.2058206513527461])
self._test_moments_one_range(-10, -9, [-9.1084562880124764,
0.0114488058210104,
-1.8985607337519652,
5.0733457094223553])
self._test_moments_one_range(-20, -19, [-19.0523439459766628,
0.0027250730180314,
-1.9838694022629291,
5.8717850028287586])
self._test_moments_one_range(-30, -29, [-29.0344012377394698,
0.0011806603928891,
-1.9930304534611458,
5.8854062968996566],
decimal_s=6)
self._test_moments_one_range(-40, -39, [-39.0256074199326264,
0.0006548826719649,
-1.9963146354109957,
5.6167758371700494])
self._test_moments_one_range(39, 40, [39.0256074199326264,
0.0006548826719649,
1.9963146354109957,
5.6167758371700494])
def test_9902_moments(self):
m, v = stats.truncnorm.stats(0, np.inf, moments='mv')
assert_almost_equal(m, 0.79788456)
assert_almost_equal(v, 0.36338023)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_11299_rvs(self):
# Arose from investigating gh-11299
# Test multiple shape parameters simultaneously.
low = [-10, 10, -np.inf, -5, -np.inf, -np.inf, -45, -45, 40, -10, 40]
high = [-5, 11, 5, np.inf, 40, -40, 40, -40, 45, np.inf, np.inf]
x = stats.truncnorm.rvs(low, high, size=(5, len(low)))
assert np.shape(x) == (5, len(low))
assert_(np.all(low <= x.min(axis=0)))
assert_(np.all(x.max(axis=0) <= high))
def test_rvs_Generator(self):
# check that rvs can use a Generator
if hasattr(np.random, "default_rng"):
stats.truncnorm.rvs(-10, -5, size=5,
random_state=np.random.default_rng())
class TestGenLogistic:
# Expected values computed with mpmath with 50 digits of precision.
@pytest.mark.parametrize('x, expected', [(-1000, -1499.5945348918917),
(-125, -187.09453489189184),
(0, -1.3274028432916989),
(100, -99.59453489189184),
(1000, -999.5945348918918)])
def test_logpdf(self, x, expected):
c = 1.5
logp = stats.genlogistic.logpdf(x, c)
assert_allclose(logp, expected, rtol=1e-13)
class TestHypergeom:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
for eaten in fruits_eaten]
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2239.771 # From R
assert_almost_equal(result, expected, decimal=3)
k = 1
M = 1600
n = 600
N = 300
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2.566567e-68 # From R
assert_almost_equal(result, expected, decimal=15)
def test_logcdf(self):
# Test logcdf for very large numbers. See issue #8692
# Results compare with those from R (v3.3.2):
# phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
# -5273.335
k = 1
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -5273.335 # From R
assert_almost_equal(result, expected, decimal=3)
# Same example as in issue #8692
k = 40
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -7.565148879229e-23 # From R
assert_almost_equal(result, expected, decimal=15)
k = 125
M = 1600
n = 250
N = 500
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -4.242688e-12 # From R
assert_almost_equal(result, expected, decimal=15)
# test broadcasting robustness based on reviewer
# concerns in PR 9603; using an array version of
# the example from issue #8692
k = np.array([40, 40, 40])
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = np.full(3, -7.565148879229e-23) # filled from R result
assert_almost_equal(result, expected, decimal=15)
class TestLoggamma:
# Expected sf values were computed with mpmath. For given x and c,
# x = mpmath.mpf(x)
# c = mpmath.mpf(c)
# sf = mpmath.gammainc(c, mpmath.exp(x), mpmath.inf,
# regularized=True)
@pytest.mark.parametrize('x, c, sf', [(4, 1.5, 1.6341528919488565e-23),
(6, 100, 8.23836829202024e-74)])
def test_sf_isf(self, x, c, sf):
s = stats.loggamma.sf(x, c)
assert_allclose(s, sf, rtol=1e-12)
y = stats.loggamma.isf(s, c)
assert_allclose(y, x, rtol=1e-12)
def test_logpdf(self):
# Test logpdf with x=-500, c=2. ln(gamma(2)) = 0, and
# exp(-500) ~= 7e-218, which is far smaller than the ULP
# of c*x=-1000, so logpdf(-500, 2) = c*x - exp(x) - ln(gamma(2))
# should give -1000.0.
lp = stats.loggamma.logpdf(-500, 2)
assert_allclose(lp, -1000.0, rtol=1e-14)
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic:
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
def test_logpdf_basic(self):
logp = stats.logistic.logpdf([-15, 0, 10])
# Expected values computed with mpmath with 50 digits of precision.
expected = [-15.000000611804547,
-1.3862943611198906,
-10.000090797798434]
assert_allclose(logp, expected, rtol=1e-13)
def test_logpdf_extreme_values(self):
logp = stats.logistic.logpdf([800, -800])
# For such large arguments, logpdf(x) = -abs(x) when computed
# with 64 bit floating point.
assert_equal(logp, [-800, -800])
@pytest.mark.parametrize("loc_rvs,scale_rvs", [np.random.rand(2)])
def test_fit(self, loc_rvs, scale_rvs):
data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
# test that result of fit method is the same as optimization
def func(input, data):
a, b = input
n = len(data)
x1 = np.sum(np.exp((data - a) / b) /
(1 + np.exp((data - a) / b))) - n / 2
x2 = np.sum(((data - a) / b) *
((np.exp((data - a) / b) - 1) /
(np.exp((data - a) / b) + 1))) - n
return x1, x2
expected_solution = root(func, stats.logistic._fitstart(data), args=(
data,)).x
fit_method = stats.logistic.fit(data)
# other than computational variances, the fit method and the solution
# to this system of equations are equal
assert_allclose(fit_method, expected_solution, atol=1e-30)
@pytest.mark.parametrize("loc_rvs,scale_rvs", [np.random.rand(2)])
def test_fit_comp_optimizer(self, loc_rvs, scale_rvs):
data = stats.logistic.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
# obtain objective function to compare results of the fit methods
args = [data, (stats.logistic._fitstart(data),)]
func = stats.logistic._reduce_func(args, {})[1]
_assert_less_or_close_loglike(stats.logistic, data, func)
@pytest.mark.parametrize('testlogcdf', [True, False])
def test_logcdfsf_tails(self, testlogcdf):
# Test either logcdf or logsf. By symmetry, we can use the same
# expected values for both by switching the sign of x for logsf.
x = np.array([-10000, -800, 17, 50, 500])
if testlogcdf:
y = stats.logistic.logcdf(x)
else:
y = stats.logistic.logsf(-x)
# The expected values were computed with mpmath.
expected = [-10000.0, -800.0, -4.139937633089748e-08,
-1.9287498479639178e-22, -7.124576406741286e-218]
assert_allclose(y, expected, rtol=2e-15)
class TestLogser:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestGumbel_r_l:
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize("dist", [stats.gumbel_r, stats.gumbel_l])
@pytest.mark.parametrize("loc_rvs,scale_rvs", ([np.random.rand(2)]))
def test_fit_comp_optimizer(self, dist, loc_rvs, scale_rvs):
data = dist.rvs(size=100, loc=loc_rvs, scale=scale_rvs)
# obtain objective function to compare results of the fit methods
args = [data, (dist._fitstart(data),)]
func = dist._reduce_func(args, {})[1]
# test that the gumbel_* fit method is better than super method
_assert_less_or_close_loglike(dist, data, func)
@pytest.mark.parametrize("dist, sgn", [(stats.gumbel_r, 1),
(stats.gumbel_l, -1)])
def test_fit(self, dist, sgn):
z = sgn*np.array([3, 3, 3, 3, 3, 3, 3, 3.00000001])
loc, scale = dist.fit(z)
# The expected values were computed with mpmath with 60 digits
# of precision.
assert_allclose(loc, sgn*3.0000000001667906)
assert_allclose(scale, 1.2495222465145514e-09, rtol=1e-6)
class TestPareto:
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
def test_sf(self):
x = 1e9
b = 2
scale = 1.5
p = stats.pareto.sf(x, b, loc=0, scale=scale)
expected = (scale/x)**b # 2.25e-18
assert_allclose(p, expected)
@pytest.mark.filterwarnings("ignore:invalid value encountered in "
"double_scalars")
@pytest.mark.parametrize("rvs_shape", [1, 2])
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_scale", [1, 5])
def test_fit(self, rvs_shape, rvs_loc, rvs_scale):
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=rvs_loc)
# shape can still be fixed with multiple names
shape_mle_analytical1 = stats.pareto.fit(data, floc=0, f0=1.04)[0]
shape_mle_analytical2 = stats.pareto.fit(data, floc=0, fix_b=1.04)[0]
shape_mle_analytical3 = stats.pareto.fit(data, floc=0, fb=1.04)[0]
assert (shape_mle_analytical1 == shape_mle_analytical2 ==
shape_mle_analytical3 == 1.04)
# data can be shifted with changes to `loc`
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=(rvs_loc + 2))
shape_mle_a, loc_mle_a, scale_mle_a = stats.pareto.fit(data, floc=2)
assert_equal(scale_mle_a + 2, data.min())
assert_equal(shape_mle_a, 1/((1/len(data - 2)) *
np.sum(np.log((data
- 2)/(data.min() - 2)))))
assert_equal(loc_mle_a, 2)
@pytest.mark.filterwarnings("ignore:invalid value encountered in "
"double_scalars")
@pytest.mark.parametrize("rvs_shape", [1, 2])
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_scale", [1, 5])
def test_fit_MLE_comp_optimzer(self, rvs_shape, rvs_loc, rvs_scale):
data = stats.pareto.rvs(size=100, b=rvs_shape, scale=rvs_scale,
loc=rvs_loc)
args = [data, (stats.pareto._fitstart(data), )]
func = stats.pareto._reduce_func(args, {})[1]
# fixed `floc` to actual location provides a better fit than the
# super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=rvs_loc)
# fixing `floc` to an arbitrary number, 0, still provides a better
# fit than the super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=0)
# fixed shape still uses MLE formula and provides a better fit than
# the super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=0, f0=4)
# valid fixed fscale still uses MLE formulas and provides a better
# fit than the super method
_assert_less_or_close_loglike(stats.pareto, data, func, floc=0,
fscale=rvs_scale/2)
def test_fit_warnings(self):
assert_fit_warnings(stats.pareto)
# `floc` that causes invalid negative data
assert_raises(FitDataError, stats.pareto.fit, [1, 2, 3], floc=2)
# `floc` and `fscale` combination causes invalid data
assert_raises(FitDataError, stats.pareto.fit, [5, 2, 3], floc=1,
fscale=3)
class TestGenpareto:
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
a, b = stats.genpareto._get_support(c)
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
# rv = stats.genpareto(c=0.)
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
# Values in 'expected_stats' are
# [mean, variance, skewness, excess kurtosis].
@pytest.mark.parametrize(
'c, expected_stats',
[(0, [1, 1, 2, 6]),
(1/4, [4/3, 32/9, 10/np.sqrt(2), np.nan]),
(1/9, [9/8, (81/64)*(9/7), (10/9)*np.sqrt(7), 754/45]),
(-1, [1/2, 1/12, 0, -6/5])])
def test_stats(self, c, expected_stats):
result = stats.genpareto.stats(c, moments='mvsk')
assert_allclose(result, expected_stats, rtol=1e-13, atol=1e-15)
def test_var(self):
# Regression test for gh-11168.
v = stats.genpareto.var(1e-8)
assert_allclose(v, 1.000000040000001, rtol=1e-13)
class TestPearson3:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
def test_negative_cdf_bug_11186(self):
# incorrect CDFs for negative skews in gh-11186; fixed in gh-12640
# Also check vectorization w/ negative, zero, and positive skews
skews = [-3, -1, 0, 0.5]
x_eval = 0.5
neg_inf = -30 # avoid RuntimeWarning caused by np.log(0)
cdfs = stats.pearson3.cdf(x_eval, skews)
int_pdfs = [quad(stats.pearson3(skew).pdf, neg_inf, x_eval)[0]
for skew in skews]
assert_allclose(cdfs, int_pdfs)
def test_return_array_bug_11746(self):
# pearson3.moment was returning size 0 or 1 array instead of float
# The first moment is equal to the loc, which defaults to zero
moment = stats.pearson3.moment(1, 2)
assert_equal(moment, 0)
assert_equal(type(moment), float)
moment = stats.pearson3.moment(1, 0.000001)
assert_equal(moment, 0)
assert_equal(type(moment), float)
class TestKappa4:
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson:
def setup_method(self):
np.random.seed(1234)
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestKSTwo:
def setup_method(self):
np.random.seed(1234)
def test_cdf(self):
for n in [1, 2, 3, 10, 100, 1000]:
# Test x-values:
# 0, 1/2n, where the cdf should be 0
# 1/n, where the cdf should be n!/n^n
# 0.5, where the cdf should match ksone.cdf
# 1-1/n, where cdf = 1-2/n^n
# 1, where cdf == 1
# (E.g. Exact values given by Eqn 1 in Simard / L'Ecuyer)
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([0, 0, v1 * elg,
1 - 2*stats.ksone.sf(0.5, n),
max(1 - 2*v1, 0.0),
1.0])
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected)
def test_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
# Same x values as in test_cdf, and use sf = 1 - cdf
x = np.array([0, 0.5/n, 1/n, 0.5, 1-1.0/n, 1])
v1 = (1.0/n)**n
lg = scipy.special.gammaln(n+1)
elg = (np.exp(lg) if v1 != 0 else 0)
expected = np.array([1.0, 1.0,
1 - v1 * elg,
2*stats.ksone.sf(0.5, n),
min(2*v1, 1.0), 0])
vals_sf = stats.kstwo.sf(x, n)
assert_allclose(vals_sf, expected)
def test_cdf_sqrtn(self):
# For fixed a, cdf(a/sqrt(n), n) -> kstwobign(a) as n->infinity
# cdf(a/sqrt(n), n) is an increasing function of n (and a)
# Check that the function is indeed increasing (allowing for some
# small floating point and algorithm differences.)
x = np.linspace(0, 2, 11)[1:]
ns = [50, 100, 200, 400, 1000, 2000]
for _x in x:
xn = _x / np.sqrt(ns)
probs = stats.kstwo.cdf(xn, ns)
diffs = np.diff(probs)
assert_array_less(diffs, 1e-8)
def test_cdf_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
vals_cdf = stats.kstwo.cdf(x, n)
vals_sf = stats.kstwo.sf(x, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_cdf_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x / np.sqrt(n)
vals_cdf = stats.kstwo.cdf(xn, n)
vals_sf = stats.kstwo.sf(xn, n)
assert_array_almost_equal(vals_cdf, 1 - vals_sf)
def test_ppf_of_cdf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
# CDFs close to 1 are better dealt with using the SF
cond = (0 < vals_cdf) & (vals_cdf < 0.99)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_isf_of_sf(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = x[x > 0.5/n]
vals_isf = stats.kstwo.isf(xn, n)
cond = (0 < vals_isf) & (vals_isf < 1.0)
vals = stats.kstwo.sf(vals_isf, n)
assert_allclose(vals[cond], xn[cond], rtol=1e-4)
def test_ppf_of_cdf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_cdf = stats.kstwo.cdf(xn, n)
cond = (0 < vals_cdf) & (vals_cdf < 1.0)
vals = stats.kstwo.ppf(vals_cdf, n)
assert_allclose(vals[cond], xn[cond])
def test_isf_of_sf_sqrtn(self):
x = np.linspace(0, 1, 11)
for n in [1, 2, 3, 10, 100, 1000]:
xn = (x / np.sqrt(n))[x > 0.5/n]
vals_sf = stats.kstwo.sf(xn, n)
# SFs close to 1 are better dealt with using the CDF
cond = (0 < vals_sf) & (vals_sf < 0.95)
vals = stats.kstwo.isf(vals_sf, n)
assert_allclose(vals[cond], xn[cond])
def test_ppf(self):
probs = np.linspace(0, 1, 11)[1:]
for n in [1, 2, 3, 10, 100, 1000]:
xn = stats.kstwo.ppf(probs, n)
vals_cdf = stats.kstwo.cdf(xn, n)
assert_allclose(vals_cdf, probs)
def test_simard_lecuyer_table1(self):
# Compute the cdf for values near the mean of the distribution.
# The mean u ~ log(2)*sqrt(pi/(2n))
# Compute for x in [u/4, u/3, u/2, u, 2u, 3u]
# This is the computation of Table 1 of Simard, R., L'Ecuyer, P. (2011)
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution".
# Except that the values below are not from the published table, but
# were generated using an independent SageMath implementation of
# Durbin's algorithm (with the exponentiation and scaling of
# Marsaglia/Tsang/Wang's version) using 500 bit arithmetic.
# Some of the values in the published table have relative
# errors greater than 1e-4.
ns = [10, 50, 100, 200, 500, 1000]
ratios = np.array([1.0/4, 1.0/3, 1.0/2, 1, 2, 3])
expected = np.array([
[1.92155292e-08, 5.72933228e-05, 2.15233226e-02, 6.31566589e-01,
9.97685592e-01, 9.99999942e-01],
[2.28096224e-09, 1.99142563e-05, 1.42617934e-02, 5.95345542e-01,
9.96177701e-01, 9.99998662e-01],
[1.00201886e-09, 1.32673079e-05, 1.24608594e-02, 5.86163220e-01,
9.95866877e-01, 9.99998240e-01],
[4.93313022e-10, 9.52658029e-06, 1.12123138e-02, 5.79486872e-01,
9.95661824e-01, 9.99997964e-01],
[2.37049293e-10, 6.85002458e-06, 1.01309221e-02, 5.73427224e-01,
9.95491207e-01, 9.99997750e-01],
[1.56990874e-10, 5.71738276e-06, 9.59725430e-03, 5.70322692e-01,
9.95409545e-01, 9.99997657e-01]
])
for idx, n in enumerate(ns):
x = ratios * np.log(2) * np.sqrt(np.pi/2/n)
vals_cdf = stats.kstwo.cdf(x, n)
assert_allclose(vals_cdf, expected[idx], rtol=1e-5)
class TestZipf:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvgauss:
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
[(2, 0, 1), (np.random.rand(3)*10)])
def test_fit(self, rvs_mu, rvs_loc, rvs_scale):
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
# Analytical MLEs are calculated with formula when `floc` is fixed
mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc)
data = data - rvs_loc
mu_temp = np.mean(data)
scale_mle = len(data) / (np.sum(data**(-1) - mu_temp**(-1)))
mu_mle = mu_temp/scale_mle
# `mu` and `scale` match analytical formula
assert_allclose(mu_mle, mu, atol=1e-15, rtol=1e-15)
assert_allclose(scale_mle, scale, atol=1e-15, rtol=1e-15)
assert_equal(loc, rvs_loc)
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
# fixed parameters are returned
mu, loc, scale = stats.invgauss.fit(data, floc=rvs_loc - 1,
fscale=rvs_scale + 1)
assert_equal(rvs_scale + 1, scale)
assert_equal(rvs_loc - 1, loc)
# shape can still be fixed with multiple names
shape_mle1 = stats.invgauss.fit(data, fmu=1.04)[0]
shape_mle2 = stats.invgauss.fit(data, fix_mu=1.04)[0]
shape_mle3 = stats.invgauss.fit(data, f0=1.04)[0]
assert shape_mle1 == shape_mle2 == shape_mle3 == 1.04
@pytest.mark.parametrize("rvs_mu,rvs_loc,rvs_scale",
[(2, 0, 1), (np.random.rand(3)*10)])
def test_fit_MLE_comp_optimzer(self, rvs_mu, rvs_loc, rvs_scale):
data = stats.invgauss.rvs(size=100, mu=rvs_mu,
loc=rvs_loc, scale=rvs_scale)
super_fit = super(type(stats.invgauss), stats.invgauss).fit
# fitting without `floc` uses superclass fit method
super_fitted = super_fit(data)
invgauss_fit = stats.invgauss.fit(data)
assert_equal(super_fitted, invgauss_fit)
# fitting with `fmu` is uses superclass fit method
super_fitted = super_fit(data, floc=0, fmu=2)
invgauss_fit = stats.invgauss.fit(data, floc=0, fmu=2)
assert_equal(super_fitted, invgauss_fit)
# obtain log-likelihood objective function to compare results
args = [data, (stats.invgauss._fitstart(data), )]
func = stats.invgauss._reduce_func(args, {})[1]
# fixed `floc` uses analytical formula and provides better fit than
# super method
_assert_less_or_close_loglike(stats.invgauss, data, func, floc=rvs_loc)
# fixed `floc` not resulting in invalid data < 0 uses analytical
# formulas and provides a better fit than the super method
assert np.all((data - (rvs_loc - 1)) > 0)
_assert_less_or_close_loglike(stats.invgauss, data, func,
floc=rvs_loc - 1)
# fixed `floc` to an arbitrary number, 0, still provides a better fit
# than the super method
_assert_less_or_close_loglike(stats.invgauss, data, func, floc=0)
# fixed `fscale` to an arbitrary number still provides a better fit
# than the super method
_assert_less_or_close_loglike(stats.invgauss, data, func, floc=rvs_loc,
fscale=np.random.rand(1)[0])
def test_fit_raise_errors(self):
assert_fit_warnings(stats.invgauss)
# FitDataError is raised when negative invalid data
with pytest.raises(FitDataError):
stats.invgauss.fit([1, 2, 3], floc=2)
def test_cdf_sf(self):
# Regression tests for gh-13614.
# Ground truth from R's statmod library (pinvgauss), e.g.
# library(statmod)
# options(digits=15)
# mu = c(4.17022005e-04, 7.20324493e-03, 1.14374817e-06,
# 3.02332573e-03, 1.46755891e-03)
# print(pinvgauss(5, mu, 1))
# make sure a finite value is returned when mu is very small. see
# GH-13614
mu = [4.17022005e-04, 7.20324493e-03, 1.14374817e-06,
3.02332573e-03, 1.46755891e-03]
expected = [1, 1, 1, 1, 1]
actual = stats.invgauss.cdf(0.4, mu=mu)
assert_equal(expected, actual)
# test if the function can distinguish small left/right tail
# probabilities from zero.
cdf_actual = stats.invgauss.cdf(0.001, mu=1.05)
assert_allclose(cdf_actual, 4.65246506892667e-219)
sf_actual = stats.invgauss.sf(110, mu=1.05)
assert_allclose(sf_actual, 4.12851625944048e-25)
# test if x does not cause numerical issues when mu is very small
# and x is close to mu in value.
# slightly smaller than mu
actual = stats.invgauss.cdf(0.00009, 0.0001)
assert_allclose(actual, 2.9458022894924e-26)
# slightly bigger than mu
actual = stats.invgauss.cdf(0.000102, 0.0001)
assert_allclose(actual, 0.976445540507925)
def test_logcdf_logsf(self):
# Regression tests for improvements made in gh-13616.
# Ground truth from R's statmod library (pinvgauss), e.g.
# library(statmod)
# options(digits=15)
# print(pinvgauss(0.001, 1.05, 1, log.p=TRUE, lower.tail=FALSE))
# test if logcdf and logsf can compute values too small to
# be represented on the unlogged scale. See: gh-13616
logcdf = stats.invgauss.logcdf(0.0001, mu=1.05)
assert_allclose(logcdf, -5003.87872590367)
logcdf = stats.invgauss.logcdf(110, 1.05)
assert_allclose(logcdf, -4.12851625944087e-25)
logsf = stats.invgauss.logsf(0.001, mu=1.05)
assert_allclose(logsf, -4.65246506892676e-219)
logsf = stats.invgauss.logsf(110, 1.05)
assert_allclose(logsf, -56.1467092416426)
class TestLaplace:
@pytest.mark.parametrize("rvs_loc", [-5, 0, 1, 2])
@pytest.mark.parametrize("rvs_scale", [1, 2, 3, 10])
def test_fit(self, rvs_loc, rvs_scale):
# tests that various inputs follow expected behavior
# for a variety of `loc` and `scale`.
data = stats.laplace.rvs(size=100, loc=rvs_loc, scale=rvs_scale)
# MLE estimates are given by
loc_mle = np.median(data)
scale_mle = np.sum(np.abs(data - loc_mle)) / len(data)
# standard outputs should match analytical MLE formulas
loc, scale = stats.laplace.fit(data)
assert_allclose(loc, loc_mle, atol=1e-15, rtol=1e-15)
assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
# fixed parameter should use analytical formula for other
loc, scale = stats.laplace.fit(data, floc=loc_mle)
assert_allclose(scale, scale_mle, atol=1e-15, rtol=1e-15)
loc, scale = stats.laplace.fit(data, fscale=scale_mle)
assert_allclose(loc, loc_mle)
# test with non-mle fixed parameter
# create scale with non-median loc
loc = rvs_loc * 2
scale_mle = np.sum(np.abs(data - loc)) / len(data)
# fixed loc to non median, scale should match
# scale calculation with modified loc
loc, scale = stats.laplace.fit(data, floc=loc)
assert_equal(scale_mle, scale)
# fixed scale created with non median loc,
# loc output should still be the data median.
loc, scale = stats.laplace.fit(data, fscale=scale_mle)
assert_equal(loc_mle, loc)
# error raised when both `floc` and `fscale` are fixed
assert_raises(RuntimeError, stats.laplace.fit, data, floc=loc_mle,
fscale=scale_mle)
# error is raised with non-finite values
assert_raises(RuntimeError, stats.laplace.fit, [np.nan])
assert_raises(RuntimeError, stats.laplace.fit, [np.inf])
@pytest.mark.parametrize("rvs_scale,rvs_loc", [(10, -5),
(5, 10),
(.2, .5)])
def test_fit_MLE_comp_optimzer(self, rvs_loc, rvs_scale):
data = stats.laplace.rvs(size=1000, loc=rvs_loc, scale=rvs_scale)
# the log-likelihood function for laplace is given by
def ll(loc, scale, data):
return -1 * (- (len(data)) * np.log(2*scale) -
(1/scale)*np.sum(np.abs(data - loc)))
# test that the objective function result of the analytical MLEs is
# less than or equal to that of the numerically optimized estimate
loc, scale = stats.laplace.fit(data)
loc_opt, scale_opt = super(type(stats.laplace),
stats.laplace).fit(data)
ll_mle = ll(loc, scale, data)
ll_opt = ll(loc_opt, scale_opt, data)
assert ll_mle < ll_opt or np.allclose(ll_mle, ll_opt,
atol=1e-15, rtol=1e-15)
def test_fit_simple_non_random_data(self):
data = np.array([1.0, 1.0, 3.0, 5.0, 8.0, 14.0])
# with `floc` fixed to 6, scale should be 4.
loc, scale = stats.laplace.fit(data, floc=6)
assert_allclose(scale, 4, atol=1e-15, rtol=1e-15)
# with `fscale` fixed to 6, loc should be 4.
loc, scale = stats.laplace.fit(data, fscale=6)
assert_allclose(loc, 4, atol=1e-15, rtol=1e-15)
def test_sf_cdf_extremes(self):
# These calculations should not generate warnings.
x = 1000
p0 = stats.laplace.cdf(-x)
# The exact value is smaller than can be represented with
# 64 bit floating point, so the exected result is 0.
assert p0 == 0.0
# The closest 64 bit floating point representation of the
# exact value is 1.0.
p1 = stats.laplace.cdf(x)
assert p1 == 1.0
p0 = stats.laplace.sf(x)
# The exact value is smaller than can be represented with
# 64 bit floating point, so the exected result is 0.
assert p0 == 0.0
# The closest 64 bit floating point representation of the
# exact value is 1.0.
p1 = stats.laplace.sf(-x)
assert p1 == 1.0
def test_sf(self):
x = 200
p = stats.laplace.sf(x)
assert_allclose(p, np.exp(-x)/2, rtol=1e-13)
def test_isf(self):
p = 1e-25
x = stats.laplace.isf(p)
assert_allclose(x, -np.log(2*p), rtol=1e-13)
class TestInvGamma:
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF:
def test_endpoints(self):
# Compute the pdf at the left endpoint dst.a.
data = [[stats.f, (2, 1), 1.0]]
for _f, _args, _correct in data:
ans = _f.pdf(_f.a, *_args)
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
def test_stats_broadcast(self):
dfn = np.array([[3], [11]])
dfd = np.array([11, 12])
m, v, s, k = stats.f.stats(dfn=dfn, dfd=dfd, moments='mvsk')
m2 = [dfd / (dfd - 2)]*2
assert_allclose(m, m2)
v2 = 2 * dfd**2 * (dfn + dfd - 2) / dfn / (dfd - 2)**2 / (dfd - 4)
assert_allclose(v, v2)
s2 = ((2*dfn + dfd - 2) * np.sqrt(8*(dfd - 4)) /
((dfd - 6) * np.sqrt(dfn*(dfn + dfd - 2))))
assert_allclose(s, s2)
k2num = 12 * (dfn * (5*dfd - 22) * (dfn + dfd - 2) +
(dfd - 4) * (dfd - 2)**2)
k2den = dfn * (dfd - 6) * (dfd - 8) * (dfn + dfd - 2)
k2 = k2num / k2den
assert_allclose(k, k2)
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
def test_moments_t():
# regression test for #8786
assert_equal(stats.t.stats(df=1, moments='mvsk'),
(np.inf, np.nan, np.nan, np.nan))
assert_equal(stats.t.stats(df=1.01, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2, moments='mvsk'),
(0.0, np.inf, np.nan, np.nan))
assert_equal(stats.t.stats(df=2.01, moments='mvsk'),
(0.0, 2.01/(2.01-2.0), np.nan, np.inf))
assert_equal(stats.t.stats(df=3, moments='sk'), (np.nan, np.inf))
assert_equal(stats.t.stats(df=3.01, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4, moments='sk'), (0.0, np.inf))
assert_equal(stats.t.stats(df=4.01, moments='sk'), (0.0, 6.0/(4.01 - 4.0)))
def test_t_entropy():
df = [1, 2, 25, 100]
# Expected values were computed with mpmath.
expected = [2.5310242469692907, 1.9602792291600821,
1.459327578078393, 1.4289633653182439]
assert_allclose(stats.t.entropy(df), expected, rtol=1e-13)
@pytest.mark.parametrize("methname", ["pdf", "logpdf", "cdf",
"ppf", "sf", "isf"])
@pytest.mark.parametrize("df_infmask", [[0, 0], [1, 1], [0, 1],
[[0, 1, 0], [1, 1, 1]],
[[1, 0], [0, 1]],
[[0], [1]]])
def test_t_inf_df(methname, df_infmask):
np.random.seed(0)
df_infmask = np.asarray(df_infmask, dtype=bool)
df = np.random.uniform(0, 10, size=df_infmask.shape)
x = np.random.randn(*df_infmask.shape)
df[df_infmask] = np.inf
t_dist = stats.t(df=df, loc=3, scale=1)
t_dist_ref = stats.t(df=df[~df_infmask], loc=3, scale=1)
norm_dist = stats.norm(loc=3, scale=1)
t_meth = getattr(t_dist, methname)
t_meth_ref = getattr(t_dist_ref, methname)
norm_meth = getattr(norm_dist, methname)
res = t_meth(x)
assert_equal(res[df_infmask], norm_meth(x[df_infmask]))
assert_equal(res[~df_infmask], t_meth_ref(x[~df_infmask]))
@pytest.mark.parametrize("df_infmask", [[0, 0], [1, 1], [0, 1],
[[0, 1, 0], [1, 1, 1]],
[[1, 0], [0, 1]],
[[0], [1]]])
def test_t_inf_df_stats_entropy(df_infmask):
np.random.seed(0)
df_infmask = np.asarray(df_infmask, dtype=bool)
df = np.random.uniform(0, 10, size=df_infmask.shape)
df[df_infmask] = np.inf
res = stats.t.stats(df=df, loc=3, scale=1, moments='mvsk')
res_ex_inf = stats.norm.stats(loc=3, scale=1, moments='mvsk')
res_ex_noinf = stats.t.stats(df=df[~df_infmask], loc=3, scale=1,
moments='mvsk')
for i in range(4):
assert_equal(res[i][df_infmask], res_ex_inf[i])
assert_equal(res[i][~df_infmask], res_ex_noinf[i])
res = stats.t.entropy(df=df, loc=3, scale=1)
res_ex_inf = stats.norm.entropy(loc=3, scale=1)
res_ex_noinf = stats.t.entropy(df=df[~df_infmask], loc=3, scale=1)
assert_equal(res[df_infmask], res_ex_inf)
assert_equal(res[~df_infmask], res_ex_noinf)
class TestRvDiscrete:
def setup_method(self):
np.random.seed(1234)
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_multidimension(self):
xk = np.arange(12).reshape((3, 4))
pk = np.array([[0.1, 0.1, 0.15, 0.05],
[0.1, 0.1, 0.05, 0.05],
[0.1, 0.1, 0.05, 0.05]])
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3]
pk = [0.5, 1.2, -0.7]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3, 4, 5]
pk = [0.3, 0.3, 0.3, 0.3, -0.2]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
def test_shape_rv_sample(self):
# tests added for gh-9565
# mismatch of 2d inputs
xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same number of elements, but shapes not compatible
xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same shapes => no error
xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
def test_expect1(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_expect2(self):
# rv_sample should override _expect. Bug report from
# https://stackoverflow.com/questions/63199792
y = [200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0,
1100.0, 1200.0, 1300.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0,
1900.0, 2000.0, 2100.0, 2200.0, 2300.0, 2400.0, 2500.0, 2600.0,
2700.0, 2800.0, 2900.0, 3000.0, 3100.0, 3200.0, 3300.0, 3400.0,
3500.0, 3600.0, 3700.0, 3800.0, 3900.0, 4000.0, 4100.0, 4200.0,
4300.0, 4400.0, 4500.0, 4600.0, 4700.0, 4800.0]
py = [0.0004, 0.0, 0.0033, 0.006500000000000001, 0.0, 0.0,
0.004399999999999999, 0.6862, 0.0, 0.0, 0.0,
0.00019999999999997797, 0.0006000000000000449,
0.024499999999999966, 0.006400000000000072,
0.0043999999999999595, 0.019499999999999962,
0.03770000000000007, 0.01759999999999995, 0.015199999999999991,
0.018100000000000005, 0.04500000000000004, 0.0025999999999999357,
0.0, 0.0041000000000001036, 0.005999999999999894,
0.0042000000000000925, 0.0050000000000000044,
0.0041999999999999815, 0.0004999999999999449,
0.009199999999999986, 0.008200000000000096,
0.0, 0.0, 0.0046999999999999265, 0.0019000000000000128,
0.0006000000000000449, 0.02510000000000001, 0.0,
0.007199999999999984, 0.0, 0.012699999999999934, 0.0, 0.0,
0.008199999999999985, 0.005600000000000049, 0.0]
rv = stats.rv_discrete(values=(y, py))
# check the mean
assert_allclose(rv.expect(), rv.mean(), atol=1e-14)
assert_allclose(rv.expect(),
sum(v * w for v, w in zip(y, py)), atol=1e-14)
# also check the second moment
assert_allclose(rv.expect(lambda x: x**2),
sum(v**2 * w for v, w in zip(y, py)), atol=1e-14)
class TestSkewCauchy:
def test_cauchy(self):
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewcauchy.pdf(x, a=0),
stats.cauchy.pdf(x))
assert_array_almost_equal(stats.skewcauchy.cdf(x, a=0),
stats.cauchy.cdf(x))
assert_array_almost_equal(stats.skewcauchy.ppf(x, a=0),
stats.cauchy.ppf(x))
def test_skewcauchy_R(self):
# options(digits=16)
# library(sgt)
# # lmbda, x contain the values generated for a, x below
# lmbda <- c(0.0976270078546495, 0.430378732744839, 0.2055267521432877,
# 0.0897663659937937, -0.15269040132219, 0.2917882261333122,
# -0.12482557747462, 0.7835460015641595, 0.9273255210020589,
# -0.2331169623484446)
# x <- c(2.917250380826646, 0.2889491975290444, 0.6804456109393229,
# 4.25596638292661, -4.289639418021131, -4.1287070029845925,
# -4.797816025596743, 3.32619845547938, 2.7815675094985046,
# 3.700121482468191)
# pdf = dsgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
# var.adj = sqrt(2))
# cdf = psgt(x, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
# var.adj = sqrt(2))
# qsgt(cdf, mu=0, lambda=lambda, sigma=1, q=1/2, mean.cent=FALSE,
# var.adj = sqrt(2))
np.random.seed(0)
a = np.random.rand(10) * 2 - 1
x = np.random.rand(10) * 10 - 5
pdf = [0.039473975217333909, 0.305829714049903223, 0.24140158118994162,
0.019585772402693054, 0.021436553695989482, 0.00909817103867518,
0.01658423410016873, 0.071083288030394126, 0.103250045941454524,
0.013110230778426242]
cdf = [0.87426677718213752, 0.37556468910780882, 0.59442096496538066,
0.91304659850890202, 0.09631964100300605, 0.03829624330921733,
0.08245240578402535, 0.72057062945510386, 0.62826415852515449,
0.95011308463898292]
assert_allclose(stats.skewcauchy.pdf(x, a), pdf)
assert_allclose(stats.skewcauchy.cdf(x, a), cdf)
assert_allclose(stats.skewcauchy.ppf(cdf, a), x)
class TestSkewNorm:
def setup_method(self):
self.rng = check_random_state(1234)
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape, random_state=self.rng)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2,
random_state=self.rng)
expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)]
computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk')
assert_array_almost_equal(computed, expected, decimal=2)
def test_cdf_large_x(self):
# Regression test for gh-7746.
# The x values are large enough that the closest 64 bit floating
# point representation of the exact CDF is 1.0.
p = stats.skewnorm.cdf([10, 20, 30], -1)
assert_allclose(p, np.ones(3), rtol=1e-14)
p = stats.skewnorm.cdf(25, 2.5)
assert_allclose(p, 1.0, rtol=1e-14)
def test_cdf_sf_small_values(self):
# Triples are [x, a, cdf(x, a)]. These values were computed
# using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha.
cdfvals = [
[-8, 1, 3.870035046664392611e-31],
[-4, 2, 8.1298399188811398e-21],
[-2, 5, 1.55326826787106273e-26],
[-9, -1, 2.257176811907681295e-19],
[-10, -4, 1.523970604832105213e-23],
]
for x, a, cdfval in cdfvals:
p = stats.skewnorm.cdf(x, a)
assert_allclose(p, cdfval, rtol=1e-8)
# For the skew normal distribution, sf(-x, -a) = cdf(x, a).
p = stats.skewnorm.sf(-x, -a)
assert_allclose(p, cdfval, rtol=1e-8)
class TestExpon:
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.expon.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.expon.fit, x)
class TestNorm:
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.norm.fit, x)
def test_bad_keyword_arg(self):
x = [1, 2, 3]
assert_raises(TypeError, stats.norm.fit, x, plate="shrimp")
class TestUniform:
"""gh-10300"""
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.uniform.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.uniform.fit, x)
class TestExponNorm:
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(RuntimeError, stats.exponnorm.fit, x, floc=0, fscale=1)
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(-900, 0.01), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 0.01), 0.0)
# Expected values for the PDF were computed with mpmath, with
# the following function, and with mpmath.mp.dps = 50.
#
# def exponnorm_stdpdf(x, K):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# t1 = mpmath.exp(1/(2*K**2) - x/K)
# erfcarg = -(x - 1/K)/mpmath.sqrt(2)
# t2 = mpmath.erfc(erfcarg)
# return t1 * t2 / (2*K)
#
@pytest.mark.parametrize('x, K, expected',
[(20, 0.01, 6.90010764753618e-88),
(1, 0.01, 0.24438994313247364),
(-1, 0.01, 0.23955149623472075),
(-20, 0.01, 4.6004708690125477e-88),
(10, 1, 7.48518298877006e-05),
(10, 10000, 9.990005048283775e-05)])
def test_std_pdf(self, x, K, expected):
assert_allclose(stats.exponnorm.pdf(x, K), expected, rtol=1e-12)
# Expected values for the CDF were computed with mpmath using
# the following function and with mpmath.mp.dps = 60:
#
# def mp_exponnorm_cdf(x, K, loc=0, scale=1):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# loc = mpmath.mpf(loc)
# scale = mpmath.mpf(scale)
# z = (x - loc)/scale
# return (mpmath.ncdf(z)
# - mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
#
@pytest.mark.parametrize('x, K, scale, expected',
[[0, 0.01, 1, 0.4960109760186432],
[-5, 0.005, 1, 2.7939945412195734e-07],
[-1e4, 0.01, 100, 0.0],
[-1e4, 0.01, 1000, 6.920401854427357e-24],
[5, 0.001, 1, 0.9999997118542392]])
def test_cdf_small_K(self, x, K, scale, expected):
p = stats.exponnorm.cdf(x, K, scale=scale)
if expected == 0.0:
assert p == 0.0
else:
assert_allclose(p, expected, rtol=1e-13)
# Expected values for the SF were computed with mpmath using
# the following function and with mpmath.mp.dps = 60:
#
# def mp_exponnorm_sf(x, K, loc=0, scale=1):
# x = mpmath.mpf(x)
# K = mpmath.mpf(K)
# loc = mpmath.mpf(loc)
# scale = mpmath.mpf(scale)
# z = (x - loc)/scale
# return (mpmath.ncdf(-z)
# + mpmath.exp((1/(2*K) - z)/K)*mpmath.ncdf(z - 1/K))
#
@pytest.mark.parametrize('x, K, scale, expected',
[[10, 0.01, 1, 8.474702916146657e-24],
[2, 0.005, 1, 0.02302280664231312],
[5, 0.005, 0.5, 8.024820681931086e-24],
[10, 0.005, 0.5, 3.0603340062892486e-89],
[20, 0.005, 0.5, 0.0],
[-3, 0.001, 1, 0.9986545205566117]])
def test_sf_small_K(self, x, K, scale, expected):
p = stats.exponnorm.sf(x, K, scale=scale)
if expected == 0.0:
assert p == 0.0
else:
assert_allclose(p, expected, rtol=5e-13)
class TestGenExpon:
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
def test_sf_tail(self):
# Expected value computed with mpmath. This script
# import mpmath
# mpmath.mp.dps = 80
# x = mpmath.mpf('15.0')
# a = mpmath.mpf('1.0')
# b = mpmath.mpf('2.0')
# c = mpmath.mpf('1.5')
# print(float(mpmath.exp((-a-b)*x + (b/c)*-mpmath.expm1(-c*x))))
# prints
# 1.0859444834514553e-19
s = stats.genexpon.sf(15, 1, 2, 1.5)
assert_allclose(s, 1.0859444834514553e-19, rtol=1e-13)
class TestExponpow:
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam:
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm:
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta:
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.beta.fit, x, floc=0, fscale=1,
plate="shrimp")
def test_fit_duplicated_fixed_parameter(self):
# At most one of 'f0', 'fa' or 'fix_a' can be given to the fit method.
# More than one raises a ValueError.
x = [0.1, 0.5, 0.6]
assert_raises(ValueError, stats.beta.fit, x, fa=0.5, fix_a=0.5)
@pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901")
def test_issue_12635(self):
# Confirm that Boost's beta distribution resolves gh-12635.
# Check against R:
# options(digits=16)
# p = 0.9999999999997369
# a = 75.0
# b = 66334470.0
# print(qbeta(p, a, b))
p, a, b = 0.9999999999997369, 75.0, 66334470.0
assert_allclose(stats.beta.ppf(p, a, b), 2.343620802982393e-06)
@pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901")
def test_issue_12794(self):
# Confirm that Boost's beta distribution resolves gh-12794.
# Check against R.
# options(digits=16)
# p = 1e-11
# count_list = c(10,100,1000)
# print(qbeta(1-p, count_list + 1, 100000 - count_list))
inv_R = np.array([0.0004944464889611935,
0.0018360586912635726,
0.0122663919942518351])
count_list = np.array([10, 100, 1000])
p = 1e-11
inv = stats.beta.isf(p, count_list + 1, 100000 - count_list)
assert_allclose(inv, inv_R)
res = stats.beta.sf(inv, count_list + 1, 100000 - count_list)
assert_allclose(res, p)
@pytest.mark.skipif(MACOS_INTEL, reason="Overflow, see gh-14901")
def test_issue_12796(self):
# Confirm that Boost's beta distribution succeeds in the case
# of gh-12796
alpha_2 = 5e-6
count_ = np.arange(1, 20)
nobs = 100000
q, a, b = 1 - alpha_2, count_ + 1, nobs - count_
inv = stats.beta.ppf(q, a, b)
res = stats.beta.cdf(inv, a, b)
assert_allclose(res, 1 - alpha_2)
def test_endpoints(self):
# Confirm that boost's beta distribution returns inf at x=1
# when b<1
a, b = 1, 0.5
assert_equal(stats.beta.pdf(1, a, b), np.inf)
# Confirm that boost's beta distribution returns inf at x=0
# when a<1
a, b = 0.2, 3
assert_equal(stats.beta.pdf(0, a, b), np.inf)
def test_boost_eval_issue_14606(self):
q, a, b = 0.995, 1.0e11, 1.0e13
with pytest.warns(RuntimeWarning):
stats.beta.ppf(q, a, b)
class TestBetaPrime:
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma:
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
def test_fit_bad_keyword_args(self):
x = [0.1, 0.5, 0.6]
assert_raises(TypeError, stats.gamma.fit, x, floc=0, plate="shrimp")
def test_isf(self):
# Test cases for when the probability is very small. See gh-13664.
# The expected values can be checked with mpmath. With mpmath,
# the survival function sf(x, k) can be computed as
#
# mpmath.gammainc(k, x, mpmath.inf, regularized=True)
#
# Here we have:
#
# >>> mpmath.mp.dps = 60
# >>> float(mpmath.gammainc(1, 39.14394658089878, mpmath.inf,
# ... regularized=True))
# 9.99999999999999e-18
# >>> float(mpmath.gammainc(100, 330.6557590436547, mpmath.inf,
# regularized=True))
# 1.000000000000028e-50
#
assert np.isclose(stats.gamma.isf(1e-17, 1),
39.14394658089878, atol=1e-14)
assert np.isclose(stats.gamma.isf(1e-50, 100),
330.6557590436547, atol=1e-13)
class TestChi2:
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
def test_ppf(self):
# Expected values computed with mpmath.
df = 4.8
x = stats.chi2.ppf(2e-47, df)
assert_allclose(x, 1.098472479575179840604902808e-19, rtol=1e-10)
x = stats.chi2.ppf(0.5, df)
assert_allclose(x, 4.15231407598589358660093156, rtol=1e-10)
df = 13
x = stats.chi2.ppf(2e-77, df)
assert_allclose(x, 1.0106330688195199050507943e-11, rtol=1e-10)
x = stats.chi2.ppf(0.1, df)
assert_allclose(x, 7.041504580095461859307179763, rtol=1e-10)
class TestGumbelL:
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestGumbelR:
def test_sf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(mpmath.mp.one - mpmath.exp(-mpmath.exp(-50)))
# 1.9287498479639178e-22
assert_allclose(stats.gumbel_r.sf(50), 1.9287498479639178e-22,
rtol=1e-14)
def test_isf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(-mpmath.log(-mpmath.log(mpmath.mp.one - 1e-17)))
# 39.14394658089878
assert_allclose(stats.gumbel_r.isf(1e-17), 39.14394658089878,
rtol=1e-14)
class TestLevyStable:
@pytest.fixture
def nolan_pdf_sample_data(self):
"""Sample data points for pdf computed with Nolan's stablec
See - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
The data table loaded below is generated from Nolan's stablec
with the following parameter space:
alpha = 0.1, 0.2, ..., 2.0
beta = -1.0, -0.9, ..., 1.0
p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5,
and the equivalent for the right tail
Typically inputs for stablec:
stablec.exe <<
1 # pdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(
Path(__file__).parent /
'data/levy_stable/stable-Z1-pdf-sample-data.npy'
)
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta,pct')
return data
@pytest.fixture
def nolan_cdf_sample_data(self):
"""Sample data points for cdf computed with Nolan's stablec
See - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
The data table loaded below is generated from Nolan's stablec
with the following parameter space:
alpha = 0.1, 0.2, ..., 2.0
beta = -1.0, -0.9, ..., 1.0
p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5,
and the equivalent for the right tail
Ideally, Nolan's output for CDF values should match the percentile
from where they have been sampled from. Even more so as we extract
percentile x positions from stablec too. However, we note at places
Nolan's stablec will produce absolute errors in order of 1e-5. We
compare against his calculations here. In future, once we less
reliant on Nolan's paper we might switch to comparing directly at
percentiles (those x values being produced from some alternative
means).
Typically inputs for stablec:
stablec.exe <<
2 # cdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(
Path(__file__).parent /
'data/levy_stable/stable-Z1-cdf-sample-data.npy'
)
data = np.core.records.fromarrays(data.T, names='x,p,alpha,beta,pct')
return data
@pytest.fixture
def nolan_loc_scale_sample_data(self):
"""Sample data where loc, scale are different from 0, 1
Data extracted in similar way to pdf/cdf above using
Nolan's stablec but set to an arbitrary location scale of
(2, 3) for various important parameters alpha, beta and for
parameterisations S0 and S1.
"""
data = np.load(
Path(__file__).parent /
'data/levy_stable/stable-loc-scale-sample-data.npy'
)
return data
@pytest.mark.parametrize(
"sample_size", [
pytest.param(50), pytest.param(1500, marks=pytest.mark.slow)
]
)
@pytest.mark.parametrize("parameterization", ["S0", "S1"])
@pytest.mark.parametrize(
"alpha,beta", [(1.0, 0), (1.0, -0.5), (1.5, 0), (1.9, 0.5)]
)
@pytest.mark.parametrize("gamma,delta", [(1, 0), (3, 2)])
def test_rvs(
self,
parameterization,
alpha,
beta,
gamma,
delta,
sample_size,
):
stats.levy_stable.parameterization = parameterization
ls = stats.levy_stable(
alpha=alpha, beta=beta, scale=gamma, loc=delta
)
_, p = stats.kstest(
ls.rvs(size=sample_size, random_state=1234), ls.cdf
)
assert p > 0.05
@pytest.mark.slow
@pytest.mark.parametrize('beta', [0.5, 1])
def test_rvs_alpha1(self, beta):
"""Additional test cases for rvs for alpha equal to 1."""
np.random.seed(987654321)
alpha = 1.0
loc = 0.5
scale = 1.5
x = stats.levy_stable.rvs(alpha, beta, loc=loc, scale=scale,
size=5000)
stat, p = stats.kstest(x, 'levy_stable',
args=(alpha, beta, loc, scale))
assert p > 0.01
def test_fit(self):
# construct data to have percentiles that match
# example in McCulloch 1986.
x = [
-.05413, -.05413, 0., 0., 0., 0., .00533, .00533, .00533, .00533,
.00533, .03354, .03354, .03354, .03354, .03354, .05309, .05309,
.05309, .05309, .05309
]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
assert_almost_equal(beta1, -.22, 2)
assert_almost_equal(scale1, 0.01717, 4)
assert_almost_equal(
loc1, 0.00233, 2
) # to 2 dps due to rounding error in McCulloch86
# cover alpha=2 scenario
x2 = x + [.05309, .05309, .05309, .05309, .05309]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert_equal(alpha2, 2)
assert_equal(beta2, -1)
assert_almost_equal(scale2, .02503, 4)
assert_almost_equal(loc2, .03354, 4)
@pytest.mark.xfail(reason="Bug. Possibly fixed by #14476.")
@pytest.mark.parametrize(
"alpha,beta,delta,gamma",
[
(1.5, 0.4, 2, 3),
(1.0, 0.4, 2, 3),
]
)
@pytest.mark.parametrize(
"parametrization", ["S0", "S1"]
)
def test_fit_rvs(self, alpha, beta, delta, gamma, parametrization):
"""Test that fit agrees with rvs for each parametrization."""
stats.levy_stable.parametrization = parametrization
data = stats.levy_stable.rvs(
alpha, beta, loc=delta, scale=gamma, size=10000, random_state=1234
)
fit = stats.levy_stable._fitstart(data)
alpha_obs, beta_obs, delta_obs, gamma_obs = fit
assert_allclose(
[alpha, beta, delta, gamma],
[alpha_obs, beta_obs, delta_obs, gamma_obs],
rtol=0.01,
)
@pytest.mark.parametrize(
"pct_range,alpha_range,beta_range", [
pytest.param(
[.01, .5, .99],
[.1, 1, 2],
[-1, 0, .8],
),
pytest.param(
[.01, .05, .5, .95, .99],
[.1, .5, 1, 1.5, 2],
[-.9, -.5, 0, .3, .6, 1],
marks=pytest.mark.slow
),
pytest.param(
[.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99],
np.linspace(0.1, 2, 20),
np.linspace(-1, 1, 21),
marks=pytest.mark.xslow,
),
]
)
def test_pdf_nolan_samples(
self, nolan_pdf_sample_data, pct_range, alpha_range, beta_range
):
"""Test pdf values against Nolan's stablec.exe output"""
data = nolan_pdf_sample_data
# some tests break on linux 32 bit
uname = platform.uname()
is_linux_32 = uname.system == 'Linux' and uname.machine == 'i686'
platform_desc = "/".join(
[uname.system, uname.machine, uname.processor])
# fmt: off
# There are a number of cases which fail on some but not all platforms.
# These are excluded by the filters below. TODO: Rewrite tests so that
# the now filtered out test cases are still run but marked in pytest as
# expected to fail.
tests = [
[
'dni', 1e-7, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
~(
(
(r['beta'] == 0) &
(r['pct'] == 0.5)
) |
(
(r['beta'] >= 0.9) &
(r['alpha'] >= 1.6) &
(r['pct'] == 0.5)
) |
(
(r['alpha'] <= 0.4) &
np.isin(r['pct'], [.01, .99])
) |
(
(r['alpha'] <= 0.3) &
np.isin(r['pct'], [.05, .95])
) |
(
(r['alpha'] <= 0.2) &
np.isin(r['pct'], [.1, .9])
) |
(
(r['alpha'] == 0.1) &
np.isin(r['pct'], [.25, .75]) &
np.isin(np.abs(r['beta']), [.5, .6, .7])
) |
(
(r['alpha'] == 0.1) &
np.isin(r['pct'], [.5]) &
np.isin(np.abs(r['beta']), [.1])
) |
(
(r['alpha'] == 0.1) &
np.isin(r['pct'], [.35, .65]) &
np.isin(np.abs(r['beta']), [-.4, -.3, .3, .4, .5])
) |
(
(r['alpha'] == 0.2) &
(r['beta'] == 0.5) &
(r['pct'] == 0.25)
) |
(
(r['alpha'] == 0.2) &
(r['beta'] == -0.3) &
(r['pct'] == 0.65)
) |
(
(r['alpha'] == 0.2) &
(r['beta'] == 0.3) &
(r['pct'] == 0.35)
) |
(
(r['alpha'] == 1.) &
np.isin(r['pct'], [.5]) &
np.isin(np.abs(r['beta']), [.1, .2, .3, .4])
) |
(
(r['alpha'] == 1.) &
np.isin(r['pct'], [.35, .65]) &
np.isin(np.abs(r['beta']), [.8, .9, 1.])
) |
(
(r['alpha'] == 1.) &
np.isin(r['pct'], [.01, .99]) &
np.isin(np.abs(r['beta']), [-.1, .1])
) |
# various points ok but too sparse to list
(r['alpha'] >= 1.1)
)
)
],
# piecewise generally good accuracy
[
'piecewise', 1e-11, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 0.2) &
(r['alpha'] != 1.)
)
],
# for alpha = 1. for linux 32 bit optimize.bisect
# has some issues for .01 and .99 percentile
[
'piecewise', 1e-11, lambda r: (
(r['alpha'] == 1.) &
(not is_linux_32) &
np.isin(r['pct'], pct_range) &
(1. in alpha_range) &
np.isin(r['beta'], beta_range)
)
],
# for small alpha very slightly reduced accuracy
[
'piecewise', 5e-11, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] <= 0.2)
)
],
# fft accuracy reduces as alpha decreases
[
'fft-simpson', 1e-5, lambda r: (
(r['alpha'] >= 1.9) &
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range)
),
],
[
'fft-simpson', 1e-6, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1) &
(r['alpha'] < 1.9)
)
],
# fft relative errors for alpha < 1, will raise if enabled
# ['fft-simpson', 1e-4, lambda r: r['alpha'] == 0.9],
# ['fft-simpson', 1e-3, lambda r: r['alpha'] == 0.8],
# ['fft-simpson', 1e-2, lambda r: r['alpha'] == 0.7],
# ['fft-simpson', 1e-1, lambda r: r['alpha'] == 0.6],
]
# fmt: on
for ix, (default_method, rtol,
filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
subdata = data[filter_func(data)
] if filter_func is not None else data
with suppress_warnings() as sup:
# occurs in FFT methods only
sup.record(
RuntimeWarning,
"Density calculations experimental for FFT method.*"
)
p = stats.levy_stable.pdf(
subdata['x'],
subdata['alpha'],
subdata['beta'],
scale=1,
loc=0
)
with np.errstate(over="ignore"):
subdata2 = rec_append_fields(
subdata,
['calc', 'abserr', 'relerr'],
[
p,
np.abs(p - subdata['p']),
np.abs(p - subdata['p']) / np.abs(subdata['p'])
]
)
failures = subdata2[
(subdata2['relerr'] >= rtol) |
np.isnan(p)
]
assert_allclose(
p,
subdata['p'],
rtol,
err_msg="pdf test %s failed with method '%s'"
" [platform: %s]\n%s\n%s" %
(ix, default_method, platform_desc, failures.dtype.names,
failures),
verbose=False
)
@pytest.mark.parametrize(
"pct_range,alpha_range,beta_range", [
pytest.param(
[.01, .5, .99],
[.1, 1, 2],
[-1, 0, .8],
),
pytest.param(
[.01, .05, .5, .95, .99],
[.1, .5, 1, 1.5, 2],
[-.9, -.5, 0, .3, .6, 1],
marks=pytest.mark.slow
),
pytest.param(
[.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99],
np.linspace(0.1, 2, 20),
np.linspace(-1, 1, 21),
marks=pytest.mark.xslow,
),
]
)
def test_cdf_nolan_samples(
self, nolan_cdf_sample_data, pct_range, alpha_range, beta_range
):
""" Test cdf values against Nolan's stablec.exe output."""
data = nolan_cdf_sample_data
tests = [
# piecewise generally good accuracy
[
'piecewise', 1e-12, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
~(
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [-0.3, -0.2, -0.1]) &
(r['pct'] == 0.01)
) |
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [0.1, 0.2, 0.3]) &
(r['pct'] == 0.99)
)
)
)
],
# for some points with alpha=1, Nolan's STABLE clearly
# loses accuracy
[
'piecewise', 5e-2, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [-0.3, -0.2, -0.1]) &
(r['pct'] == 0.01)
) |
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [0.1, 0.2, 0.3]) &
(r['pct'] == 0.99)
)
)
],
# fft accuracy poor, very poor alpha < 1
[
'fft-simpson', 1e-5, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.7)
)
],
[
'fft-simpson', 1e-4, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.5) &
(r['alpha'] <= 1.7)
)
],
[
'fft-simpson', 1e-3, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.3) &
(r['alpha'] <= 1.5)
)
],
[
'fft-simpson', 1e-2, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.0) &
(r['alpha'] <= 1.3)
)
],
]
for ix, (default_method, rtol,
filter_func) in enumerate(tests):
stats.levy_stable.cdf_default_method = default_method
subdata = data[filter_func(data)
] if filter_func is not None else data
with suppress_warnings() as sup:
sup.record(
RuntimeWarning,
'Cumulative density calculations experimental for FFT'
+ ' method. Use piecewise method instead.*'
)
p = stats.levy_stable.cdf(
subdata['x'],
subdata['alpha'],
subdata['beta'],
scale=1,
loc=0
)
with np.errstate(over="ignore"):
subdata2 = rec_append_fields(
subdata,
['calc', 'abserr', 'relerr'],
[
p,
np.abs(p - subdata['p']),
np.abs(p - subdata['p']) / np.abs(subdata['p'])
]
)
failures = subdata2[
(subdata2['relerr'] >= rtol) |
np.isnan(p)
]
assert_allclose(
p,
subdata['p'],
rtol,
err_msg="cdf test %s failed with method '%s'\n%s\n%s" %
(ix, default_method, failures.dtype.names, failures),
verbose=False
)
@pytest.mark.parametrize("param", [0, 1])
@pytest.mark.parametrize("case", ["pdf", "cdf"])
def test_location_scale(
self, nolan_loc_scale_sample_data, param, case
):
"""Tests for pdf and cdf where loc, scale are different from 0, 1
"""
data = nolan_loc_scale_sample_data
# We only test against piecewise as location/scale transforms
# are same for other methods.
stats.levy_stable.cdf_default_method = "piecewise"
stats.levy_stable.pdf_default_method = "piecewise"
subdata = data[data["param"] == param]
stats.levy_stable.parameterization = f"S{param}"
assert case in ["pdf", "cdf"]
function = (
stats.levy_stable.pdf if case == "pdf" else stats.levy_stable.cdf
)
v1 = function(
subdata['x'], subdata['alpha'], subdata['beta'], scale=2, loc=3
)
assert_allclose(v1, subdata[case], 1e-5)
@pytest.mark.parametrize(
"method,decimal_places",
[
['dni', 4],
['piecewise', 4],
]
)
def test_pdf_alpha_equals_one_beta_non_zero(self, method, decimal_places):
""" sample points extracted from Tables and Graphs of Stable
Probability Density Functions - Donald R Holt - 1973 - p 187.
"""
xs = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]
)
density = np.array(
[
.3183, .3096, .2925, .2622, .1591, .1587, .1599, .1635, .0637,
.0729, .0812, .0955, .0318, .0390, .0458, .0586, .0187, .0236,
.0285, .0384
]
)
betas = np.array(
[
0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0,
.25, .5, 1
]
)
with np.errstate(all='ignore'), suppress_warnings() as sup:
sup.filter(
category=RuntimeWarning,
message="Density calculation unstable.*"
)
stats.levy_stable.pdf_default_method = method
# stats.levy_stable.fft_grid_spacing = 0.0001
pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
assert_almost_equal(
pdf, density, decimal_places, method
)
@pytest.mark.parametrize(
"params,expected",
[
[(1.48, -.22, 0, 1), (0, np.inf, np.NaN, np.NaN)],
[(2, .9, 10, 1.5), (10, 4.5, 0, 0)]
]
)
def test_stats(self, params, expected):
observed = stats.levy_stable.stats(
params[0], params[1], loc=params[2], scale=params[3],
moments='mvsk'
)
assert_almost_equal(observed, expected)
@pytest.mark.parametrize('alpha', [0.25, 0.5, 0.75])
@pytest.mark.parametrize(
'function,beta,points,expected',
[
(
stats.levy_stable.cdf,
1.0,
np.linspace(-25, 0, 10),
0.0,
),
(
stats.levy_stable.pdf,
1.0,
np.linspace(-25, 0, 10),
0.0,
),
(
stats.levy_stable.cdf,
-1.0,
np.linspace(0, 25, 10),
1.0,
),
(
stats.levy_stable.pdf,
-1.0,
np.linspace(0, 25, 10),
0.0,
)
]
)
def test_distribution_outside_support(
self, alpha, function, beta, points, expected
):
"""Ensure the pdf/cdf routines do not return nan outside support.
This distribution's support becomes truncated in a few special cases:
support is [mu, infty) if alpha < 1 and beta = 1
support is (-infty, mu] if alpha < 1 and beta = -1
Otherwise, the support is all reals. Here, mu is zero by default.
"""
assert 0 < alpha < 1
assert_almost_equal(
function(points, alpha=alpha, beta=beta),
np.full(len(points), expected)
)
class TestArrayArgument: # test for ticket:992
def setup_method(self):
np.random.seed(1234)
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring:
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
assert_("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
assert_("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod:
skip = ['ncf', 'ksone', 'kstwo']
def setup_method(self):
np.random.seed(1234)
# skip these b/c deprecated, or only loc and scale arguments
fitSkipNonFinite = ['expon', 'norm', 'uniform']
@pytest.mark.parametrize('dist,args', distcont)
def test_fit_w_non_finite_data_values(self, dist, args):
"""gh-10300"""
if dist in self.fitSkipNonFinite:
pytest.skip("%s fit known to fail or deprecated" % dist)
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
distfunc = getattr(stats, dist)
assert_raises(RuntimeError, distfunc.fit, x, floc=0, fscale=1)
assert_raises(RuntimeError, distfunc.fit, y, floc=0, fscale=1)
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[expected_shape, 0, 20], atol=1e-8)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_expon_fit(self):
x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
loc, scale = stats.expon.fit(x)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 2) # x.mean() - x.min()
loc, scale = stats.expon.fit(x, fscale=3)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 3) # fscale
loc, scale = stats.expon.fit(x, floc=0)
assert_equal(loc, 0) # floc
assert_equal(scale, 4) # x.mean() - loc
def test_lognorm_fit(self):
x = np.array([1.5, 3, 10, 15, 23, 59])
lnxm1 = np.log(x - 1)
shape, loc, scale = stats.lognorm.fit(x, floc=1)
assert_allclose(shape, lnxm1.std(), rtol=1e-12)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
rtol=1e-12)
assert_equal(loc, 1)
assert_equal(scale, 6)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
assert_equal(shape, 0.75)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
def test_uniform_fit(self):
x = np.array([1.0, 1.1, 1.2, 9.0])
loc, scale = stats.uniform.fit(x)
assert_equal(loc, x.min())
assert_equal(scale, x.ptp())
loc, scale = stats.uniform.fit(x, floc=0)
assert_equal(loc, 0)
assert_equal(scale, x.max())
loc, scale = stats.uniform.fit(x, fscale=10)
assert_equal(loc, 0)
assert_equal(scale, 10)
assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
@pytest.mark.parametrize("method", ["MLE", "MM"])
def test_fshapes(self, method):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3., method=method)
res_2 = stats.beta.fit(x, fa=3., method=method)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3., method=method)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4., method=method)
res_4 = stats.beta.fit(x, fb=4., method=method)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4., method=method)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2, method=method)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3, method=method)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1, method=method)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a, method=method)
assert_equal(aa, a)
@pytest.mark.parametrize("method", ["MLE", "MM"])
def test_extra_params(self, method):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct, method=method)
class TestFrozen:
def setup_method(self):
np.random.seed(1234)
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c=c)
assert_equal(rv.dist._get_support(c), [0, np.inf])
c = -0.1
rv = stats.genpareto(c=c)
a, b = rv.dist._get_support(c)
assert_equal([a, b], [0., 10.])
c = 0.1
stats.genpareto.pdf(0, c) # this should NOT change genpareto.b
assert_equal((rv.dist.a, rv.dist.b), stats.genpareto._get_support(c))
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
rv = stats.genpareto(c=c)
a, b = rv.a, rv.b
assert_equal(a, 0.)
assert_(np.isposinf(b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
a, b = stats.genpareto._get_support(c)
assert_allclose([a, b], [0., 0.5])
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect:
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct:
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=0.9, nc=0.3, moments='mvsk')
assert_equal([m, v, s, k], [np.nan, np.nan, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.nan, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
def test_nct_stats_large_df_values(self):
# previously gamma function was used which lost precision at df=345
# cf. https://github.com/scipy/scipy/issues/12919 for details
nct_mean_df_1000 = stats.nct.mean(1000, 2)
nct_stats_df_1000 = stats.nct.stats(1000, 2)
# These expected values were computed with mpmath. They were also
# verified with the Wolfram Alpha expressions:
# Mean[NoncentralStudentTDistribution[1000, 2]]
# Var[NoncentralStudentTDistribution[1000, 2]]
expected_stats_df_1000 = [2.0015015641422464, 1.0040115288163005]
assert_allclose(nct_mean_df_1000, expected_stats_df_1000[0],
rtol=1e-10)
assert_allclose(nct_stats_df_1000, expected_stats_df_1000,
rtol=1e-10)
# and a bigger df value
nct_mean = stats.nct.mean(100000, 2)
nct_stats = stats.nct.stats(100000, 2)
# These expected values were computed with mpmath.
expected_stats = [2.0000150001562518, 1.0000400011500288]
assert_allclose(nct_mean, expected_stats[0], rtol=1e-10)
assert_allclose(nct_stats, expected_stats, rtol=1e-9)
class TestRecipInvGauss:
def test_pdf_endpoint(self):
p = stats.recipinvgauss.pdf(0, 0.6)
assert p == 0.0
def test_logpdf_endpoint(self):
logp = stats.recipinvgauss.logpdf(0, 0.6)
assert logp == -np.inf
def test_cdf_small_x(self):
# The expected value was computer with mpmath:
#
# import mpmath
#
# mpmath.mp.dps = 100
#
# def recipinvgauss_cdf_mp(x, mu):
# x = mpmath.mpf(x)
# mu = mpmath.mpf(mu)
# trm1 = 1/mu - x
# trm2 = 1/mu + x
# isqx = 1/mpmath.sqrt(x)
# return (mpmath.ncdf(-isqx*trm1)
# - mpmath.exp(2/mu)*mpmath.ncdf(-isqx*trm2))
#
p = stats.recipinvgauss.cdf(0.05, 0.5)
expected = 6.590396159501331e-20
assert_allclose(p, expected, rtol=1e-14)
def test_sf_large_x(self):
# The expected value was computed with mpmath; see test_cdf_small.
p = stats.recipinvgauss.sf(80, 0.5)
expected = 2.699819200556787e-18
assert_allclose(p, expected, 5e-15)
class TestRice:
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
def test_rice_gh9836(self):
# test that gh-9836 is resolved; previously jumped to 1 at the end
cdf = stats.rice.cdf(np.arange(10, 160, 10), np.arange(10, 160, 10))
# Generated in R
# library(VGAM)
# options(digits=16)
# x = seq(10, 150, 10)
# print(price(x, sigma=1, vee=x))
cdf_exp = [0.4800278103504522, 0.4900233218590353, 0.4933500379379548,
0.4950128317658719, 0.4960103776798502, 0.4966753655438764,
0.4971503395812474, 0.4975065620443196, 0.4977836197921638,
0.4980052636649550, 0.4981866072661382, 0.4983377260666599,
0.4984655952615694, 0.4985751970541413, 0.4986701850071265]
assert_allclose(cdf, cdf_exp)
probabilities = np.arange(0.1, 1, 0.1)
ppf = stats.rice.ppf(probabilities, 500/4, scale=4)
# Generated in R
# library(VGAM)
# options(digits=16)
# p = seq(0.1, .9, by = .1)
# print(qrice(p, vee = 500, sigma = 4))
ppf_exp = [494.8898762347361, 496.6495690858350, 497.9184315188069,
499.0026277378915, 500.0159999146250, 501.0293721352668,
502.1135684981884, 503.3824312270405, 505.1421247157822]
assert_allclose(ppf, ppf_exp)
ppf = scipy.stats.rice.ppf(0.5, np.arange(10, 150, 10))
# Generated in R
# library(VGAM)
# options(digits=16)
# b <- seq(10, 140, 10)
# print(qrice(0.5, vee = b, sigma = 1))
ppf_exp = [10.04995862522287, 20.02499480078302, 30.01666512465732,
40.01249934924363, 50.00999966676032, 60.00833314046875,
70.00714273568241, 80.00624991862573, 90.00555549840364,
100.00499995833597, 110.00454542324384, 120.00416664255323,
130.00384613488120, 140.00357141338748]
assert_allclose(ppf, ppf_exp)
class TestErlang:
def setup_method(self):
np.random.seed(1234)
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
def test_gh_pr_10949_argcheck(self):
assert_equal(stats.erlang.pdf(0.5, a=[1, -1]),
stats.gamma.pdf(0.5, a=[1, -1]))
class TestRayleigh:
def setup_method(self):
np.random.seed(987654321)
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
@pytest.mark.parametrize("rvs_loc,rvs_scale", [np.random.rand(2)])
def test_fit(self, rvs_loc, rvs_scale):
data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
def scale_mle(data, floc):
return (np.sum((data - floc) ** 2) / (2 * len(data))) ** .5
# when `floc` is provided, `scale` is found with an analytical formula
scale_expect = scale_mle(data, rvs_loc)
loc, scale = stats.rayleigh.fit(data, floc=rvs_loc)
assert_equal(loc, rvs_loc)
assert_equal(scale, scale_expect)
# when `fscale` is fixed, superclass fit is used to determine `loc`.
loc, scale = stats.rayleigh.fit(data, fscale=.6)
assert_equal(scale, .6)
# with both parameters free, one dimensional optimization is done
# over a new function that takes into account the dependent relation
# of `scale` to `loc`.
loc, scale = stats.rayleigh.fit(data)
# test that `scale` is defined by its relation to `loc`
assert_equal(scale, scale_mle(data, loc))
@pytest.mark.parametrize("rvs_loc,rvs_scale", [[0.74, 0.01],
np.random.rand(2)])
def test_fit_comparison_super_method(self, rvs_loc, rvs_scale):
# test that the objective function result of the analytical MLEs is
# less than or equal to that of the numerically optimized estimate
data = stats.rayleigh.rvs(size=250, loc=rvs_loc, scale=rvs_scale)
# obtain objective function with same method as `rv_continuous.fit`
args = [data, (stats.rayleigh._fitstart(data), )]
func = stats.rayleigh._reduce_func(args, {})[1]
_assert_less_or_close_loglike(stats.rayleigh, data, func)
def test_fit_warnings(self):
assert_fit_warnings(stats.rayleigh)
class TestExponWeib:
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestFatigueLife:
def test_sf_tail(self):
# Expected value computed with mpmath:
# import mpmath
# mpmath.mp.dps = 80
# x = mpmath.mpf(800.0)
# c = mpmath.mpf(2.5)
# s = float(1 - mpmath.ncdf(1/c * (mpmath.sqrt(x)
# - 1/mpmath.sqrt(x))))
# print(s)
# Output:
# 6.593376447038406e-30
s = stats.fatiguelife.sf(800.0, 2.5)
assert_allclose(s, 6.593376447038406e-30, rtol=1e-13)
def test_isf_tail(self):
# See test_sf_tail for the mpmath code.
p = 6.593376447038406e-30
q = stats.fatiguelife.isf(p, 2.5)
assert_allclose(q, 800.0, rtol=1e-13)
class TestWeibull:
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist:
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_rdist_beta(self):
# rdist is a special case of stats.beta
x = np.linspace(-0.99, 0.99, 10)
c = 2.7
assert_almost_equal(0.5*stats.beta(c/2, c/2).pdf((x + 1)/2),
stats.rdist(c).pdf(x))
class TestTrapezoid:
def test_reduces_to_triang(self):
modes = [0, 0.3, 0.5, 1]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapezoid.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapezoid.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapezoid.pdf(x, 0, 1), stats.uniform.pdf(x))
assert_almost_equal(stats.trapezoid.cdf(x, 0, 1), stats.uniform.cdf(x))
def test_cases(self):
# edge cases
assert_almost_equal(stats.trapezoid.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapezoid.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0, 0.8),
1.11111111111111111)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 1.0),
1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapezoid.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapezoid.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapezoid.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapezoid.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapezoid.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapezoid.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapezoid.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapezoid.cdf(1.0, 0.2, 0.8), 1.0)
def test_moments_and_entropy(self):
# issue #11795: improve precision of trapezoid stats
# Apply formulas from Wikipedia for the following parameters:
a, b, c, d = -3, -1, 2, 3 # => 1/3, 5/6, -3, 6
p1, p2, loc, scale = (b-a) / (d-a), (c-a) / (d-a), a, d-a
h = 2 / (d+c-b-a)
def moment(n):
return (h * ((d**(n+2) - c**(n+2)) / (d-c)
- (b**(n+2) - a**(n+2)) / (b-a)) /
(n+1) / (n+2))
mean = moment(1)
var = moment(2) - mean**2
entropy = 0.5 * (d-c+b-a) / (d+c-b-a) + np.log(0.5 * (d+c-b-a))
assert_almost_equal(stats.trapezoid.mean(p1, p2, loc, scale),
mean, decimal=13)
assert_almost_equal(stats.trapezoid.var(p1, p2, loc, scale),
var, decimal=13)
assert_almost_equal(stats.trapezoid.entropy(p1, p2, loc, scale),
entropy, decimal=13)
# Check boundary cases where scipy d=0 or d=1.
assert_almost_equal(stats.trapezoid.mean(0, 0, -3, 6), -1, decimal=13)
assert_almost_equal(stats.trapezoid.mean(0, 1, -3, 6), 0, decimal=13)
assert_almost_equal(stats.trapezoid.var(0, 1, -3, 6), 3, decimal=13)
def test_trapezoid_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapezoid.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapezoid.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
# Check that the stats() method supports vector arguments.
v = np.asarray(stats.trapezoid.stats(c, d, moments="mvsk"))
cc, dd = np.broadcast_arrays(c, d)
res = np.empty((cc.size, 4)) # 4 stats returned per value
ind = np.arange(cc.size)
for i, c1, d1 in zip(ind, cc.ravel(), dd.ravel()):
res[i] = stats.trapezoid.stats(c1, d1, moments="mvsk")
assert_allclose(v, res.T.reshape(v.shape), atol=1e-15)
def test_trapz(self):
# Basic test for alias
x = np.linspace(0, 1, 10)
assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x))
class TestTriang:
def test_edge_cases(self):
with np.errstate(all='raise'):
assert_equal(stats.triang.pdf(0, 0), 2.)
assert_equal(stats.triang.pdf(0.5, 0), 1.)
assert_equal(stats.triang.pdf(1, 0), 0.)
assert_equal(stats.triang.pdf(0, 1), 0)
assert_equal(stats.triang.pdf(0.5, 1), 1.)
assert_equal(stats.triang.pdf(1, 1), 2)
assert_equal(stats.triang.cdf(0., 0.), 0.)
assert_equal(stats.triang.cdf(0.5, 0.), 0.75)
assert_equal(stats.triang.cdf(1.0, 0.), 1.0)
assert_equal(stats.triang.cdf(0., 1.), 0.)
assert_equal(stats.triang.cdf(0.5, 1.), 0.25)
assert_equal(stats.triang.cdf(1., 1.), 1)
class TestMielke:
def test_moments(self):
k, s = 4.642, 0.597
# n-th moment exists only if n < s
assert_equal(stats.mielke(k, s).moment(1), np.inf)
assert_equal(stats.mielke(k, 1.0).moment(1), np.inf)
assert_(np.isfinite(stats.mielke(k, 1.01).moment(1)))
def test_burr_equivalence(self):
x = np.linspace(0.01, 100, 50)
k, s = 2.45, 5.32
assert_allclose(stats.burr.pdf(x, s, k/s), stats.mielke.pdf(x, k, s))
class TestBurr:
def test_endpoints_7491(self):
# gh-7491
# Compute the pdf at the left endpoint dst.a.
data = [
[stats.fisk, (1,), 1],
[stats.burr, (0.5, 2), 1],
[stats.burr, (1, 1), 1],
[stats.burr, (2, 0.5), 1],
[stats.burr12, (1, 0.5), 0.5],
[stats.burr12, (1, 1), 1.0],
[stats.burr12, (1, 2), 2.0]]
ans = [_f.pdf(_f.a, *_args) for _f, _args, _ in data]
correct = [_correct_ for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
ans = [_f.logpdf(_f.a, *_args) for _f, _args, _ in data]
correct = [np.log(_correct_) for _f, _args, _correct_ in data]
assert_array_almost_equal(ans, correct)
def test_burr_stats_9544(self):
# gh-9544. Test from gh-9978
c, d = 5.0, 3
mean, variance = stats.burr(c, d).stats()
# mean = sc.beta(3 + 1/5, 1. - 1/5) * 3 = 1.4110263...
# var = sc.beta(3 + 2 / 5, 1. - 2 / 5) * 3 -
# (sc.beta(3 + 1 / 5, 1. - 1 / 5) * 3) ** 2
mean_hc, variance_hc = 1.4110263183925857, 0.22879948026191643
assert_allclose(mean, mean_hc)
assert_allclose(variance, variance_hc)
def test_burr_nan_mean_var_9544(self):
# gh-9544. Test from gh-9978
c, d = 0.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isnan(mean))
assert_(np.isnan(variance))
c, d = 1.5, 3
mean, variance = stats.burr(c, d).stats()
assert_(np.isfinite(mean))
assert_(np.isnan(variance))
c, d = 0.5, 3
e1, e2, e3, e4 = stats.burr._munp(np.array([1, 2, 3, 4]), c, d)
assert_(np.isnan(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 1.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isnan(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 2.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isnan(e3))
assert_(np.isnan(e4))
c, d = 3.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isnan(e4))
c, d = 4.5, 3
e1, e2, e3, e4 = stats.burr._munp([1, 2, 3, 4], c, d)
assert_(np.isfinite(e1))
assert_(np.isfinite(e2))
assert_(np.isfinite(e3))
assert_(np.isfinite(e4))
class TestStudentizedRange:
# For alpha = .05, .01, and .001, and for each value of
# v = [1, 3, 10, 20, 120, inf], a Q was picked from each table for
# k = [2, 8, 14, 20].
# these arrays are written with `k` as column, and `v` as rows.
# Q values are taken from table 3:
# https://www.jstor.org/stable/2237810
q05 = [17.97, 45.40, 54.33, 59.56,
4.501, 8.853, 10.35, 11.24,
3.151, 5.305, 6.028, 6.467,
2.950, 4.768, 5.357, 5.714,
2.800, 4.363, 4.842, 5.126,
2.772, 4.286, 4.743, 5.012]
q01 = [90.03, 227.2, 271.8, 298.0,
8.261, 15.64, 18.22, 19.77,
4.482, 6.875, 7.712, 8.226,
4.024, 5.839, 6.450, 6.823,
3.702, 5.118, 5.562, 5.827,
3.643, 4.987, 5.400, 5.645]
q001 = [900.3, 2272, 2718, 2980,
18.28, 34.12, 39.69, 43.05,
6.487, 9.352, 10.39, 11.03,
5.444, 7.313, 7.966, 8.370,
4.772, 6.039, 6.448, 6.695,
4.654, 5.823, 6.191, 6.411]
qs = np.concatenate((q05, q01, q001))
ps = [.95, .99, .999]
vs = [1, 3, 10, 20, 120, np.inf]
ks = [2, 8, 14, 20]
data = zip(product(ps, vs, ks), qs)
# A small selection of large-v cases generated with R's `ptukey`
# Each case is in the format (q, k, v, r_result)
r_data = [
(0.1, 3, 9001, 0.002752818526842),
(1, 10, 1000, 0.000526142388912),
(1, 3, np.inf, 0.240712641229283),
(4, 3, np.inf, 0.987012338626815),
(1, 10, np.inf, 0.000519869467083),
]
def test_cdf_against_tables(self):
for pvk, q in self.data:
p_expected, v, k = pvk
res_p = stats.studentized_range.cdf(q, k, v)
assert_allclose(res_p, p_expected, rtol=1e-4)
@pytest.mark.slow
def test_ppf_against_tables(self):
for pvk, q_expected in self.data:
res_q = stats.studentized_range.ppf(*pvk)
assert_allclose(res_q, q_expected, rtol=1e-4)
path_prefix = os.path.dirname(__file__)
relative_path = "data/studentized_range_mpmath_ref.json"
with open(os.path.join(path_prefix, relative_path), "r") as file:
pregenerated_data = json.load(file)
@pytest.mark.parametrize("case_result", pregenerated_data["cdf_data"])
def test_cdf_against_mp(self, case_result):
src_case = case_result["src_case"]
mp_result = case_result["mp_result"]
qkv = src_case["q"], src_case["k"], src_case["v"]
res = stats.studentized_range.cdf(*qkv)
assert_allclose(res, mp_result,
atol=src_case["expected_atol"],
rtol=src_case["expected_rtol"])
@pytest.mark.parametrize("case_result", pregenerated_data["pdf_data"])
def test_pdf_against_mp(self, case_result):
src_case = case_result["src_case"]
mp_result = case_result["mp_result"]
qkv = src_case["q"], src_case["k"], src_case["v"]
res = stats.studentized_range.pdf(*qkv)
assert_allclose(res, mp_result,
atol=src_case["expected_atol"],
rtol=src_case["expected_rtol"])
@pytest.mark.slow
@pytest.mark.parametrize("case_result", pregenerated_data["moment_data"])
def test_moment_against_mp(self, case_result):
src_case = case_result["src_case"]
mp_result = case_result["mp_result"]
mkv = src_case["m"], src_case["k"], src_case["v"]
res = stats.studentized_range.moment(*mkv)
assert_allclose(res, mp_result,
atol=src_case["expected_atol"],
rtol=src_case["expected_rtol"])
def test_pdf_integration(self):
k, v = 3, 10
# Test whether PDF integration is 1 like it should be.
res = quad(stats.studentized_range.pdf, 0, np.inf, args=(k, v))
assert_allclose(res[0], 1)
@pytest.mark.xslow
def test_pdf_against_cdf(self):
k, v = 3, 10
# Test whether the integrated PDF matches the CDF using cumulative
# integration. Use a small step size to reduce error due to the
# summation. This is slow, but tests the results well.
x = np.arange(0, 10, step=0.01)
y_cdf = stats.studentized_range.cdf(x, k, v)[1:]
y_pdf_raw = stats.studentized_range.pdf(x, k, v)
y_pdf_cumulative = cumulative_trapezoid(y_pdf_raw, x)
# Because of error caused by the summation, use a relatively large rtol
assert_allclose(y_pdf_cumulative, y_cdf, rtol=1e-4)
@pytest.mark.parametrize("r_case_result", r_data)
def test_cdf_against_r(self, r_case_result):
# Test large `v` values using R
q, k, v, r_res = r_case_result
res = stats.studentized_range.cdf(q, k, v)
assert_allclose(res, r_res)
@pytest.mark.slow
def test_moment_vectorization(self):
# Test moment broadcasting. Calls `_munp` directly because
# `rv_continuous.moment` is broken at time of writing. See gh-12192
m = stats.studentized_range._munp([1, 2], [4, 5], [10, 11])
assert_allclose(m.shape, (2,))
with pytest.raises(ValueError, match="...could not be broadcast..."):
stats.studentized_range._munp(1, [4, 5], [10, 11, 12])
@pytest.mark.xslow
def test_fitstart_valid(self):
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
# the integration warning message may differ
sup.filter(IntegrationWarning)
k, df, _, _ = stats.studentized_range._fitstart([1, 2, 3])
assert_(stats.studentized_range._argcheck(k, df))
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
with np.errstate(divide='ignore'):
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
with np.errstate(divide='ignore'):
params = np.array(stats.lognorm.fit(x, floc=0.))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see https://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See https://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
def test_gengamma_endpoint_with_neg_c():
p = stats.gengamma.pdf(0, 1, -1)
assert p == 0.0
logp = stats.gengamma.logpdf(0, 1, -1)
assert logp == -np.inf
def test_gengamma_munp():
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The maximum number of subdivisions .50. has been "
"achieved.")
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
stats.ksone.fit(d)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_levy_sf():
# Large values, far into the tail of the distribution.
x = np.array([1e15, 1e25, 1e35, 1e50])
# Expected values were calculated with mpmath.
expected = np.array([2.5231325220201597e-08,
2.52313252202016e-13,
2.52313252202016e-18,
7.978845608028653e-26])
y = stats.levy.sf(x)
assert_allclose(y, expected, rtol=1e-14)
def test_levy_l_sf():
# Test levy_l.sf for small arguments.
x = np.array([-0.016, -0.01, -0.005, -0.0015])
# Expected values were calculated with mpmath.
expected = np.array([2.6644463892359302e-15,
1.523970604832107e-23,
2.0884875837625492e-45,
5.302850374626878e-147])
y = stats.levy_l.sf(x)
assert_allclose(y, expected, rtol=1e-13)
def test_levy_l_isf():
# Test roundtrip sf(isf(p)), including a small input value.
p = np.array([3.0e-15, 0.25, 0.99])
x = stats.levy_l.isf(p)
q = stats.levy_l.sf(x)
assert_allclose(q, p, rtol=5e-14)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
np.random.seed(1234)
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
# Verify logpdf has extended precision when pdf underflows to 0
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
assert_equal(stats.ncx2.pdf(10000, 3, 12), 0)
assert_allclose(stats.ncx2.logpdf(10000, 3, 12), -4662.444377524883)
@pytest.mark.parametrize('method, expected', [
('cdf', np.array([2.497951336e-09, 3.437288941e-10])),
('pdf', np.array([1.238579980e-07, 1.710041145e-08])),
('logpdf', np.array([-15.90413011, -17.88416331])),
('ppf', np.array([4.865182052, 7.017182271]))
])
def test_ncx2_zero_nc(method, expected):
# gh-5441
# ncx2 with nc=0 is identical to chi2
# Comparison to R (v3.5.1)
# > options(digits=10)
# > pchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4))
# > dchisq(0.1, df=10, ncp=c(0,4), log=TRUE)
# > qchisq(0.1, df=10, ncp=c(0,4))
result = getattr(stats.ncx2, method)(0.1, nc=[0, 4], df=10)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_zero_nc_rvs():
# gh-5441
# ncx2 with nc=0 is identical to chi2
result = stats.ncx2.rvs(df=10, nc=0, random_state=1)
expected = stats.chi2.rvs(df=10, random_state=1)
assert_allclose(result, expected, atol=1e-15)
def test_ncx2_gh12731():
# test that gh-12731 is resolved; previously these were all 0.5
nc = 10**np.arange(5, 10)
assert_equal(stats.ncx2.cdf(1e4, df=1, nc=nc), 0)
def test_ncx2_gh8665():
# test that gh-8665 is resolved; previously this tended to nonzero value
x = np.array([4.99515382e+00, 1.07617327e+01, 2.31854502e+01,
4.99515382e+01, 1.07617327e+02, 2.31854502e+02,
4.99515382e+02, 1.07617327e+03, 2.31854502e+03,
4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
4.99515382e+04])
nu, lam = 20, 499.51538166556196
sf = stats.ncx2.sf(x, df=nu, nc=lam)
# computed in R. Couldn't find a survival function implementation
# options(digits=16)
# x <- c(4.99515382e+00, 1.07617327e+01, 2.31854502e+01, 4.99515382e+01,
# 1.07617327e+02, 2.31854502e+02, 4.99515382e+02, 1.07617327e+03,
# 2.31854502e+03, 4.99515382e+03, 1.07617327e+04, 2.31854502e+04,
# 4.99515382e+04)
# nu <- 20
# lam <- 499.51538166556196
# 1 - pchisq(x, df = nu, ncp = lam)
sf_expected = [1.0000000000000000, 1.0000000000000000, 1.0000000000000000,
1.0000000000000000, 1.0000000000000000, 0.9999999999999888,
0.6646525582135460, 0.0000000000000000, 0.0000000000000000,
0.0000000000000000, 0.0000000000000000, 0.0000000000000000,
0.0000000000000000]
assert_allclose(sf, sf_expected, atol=1e-12)
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes:
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes:
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
assert_raises(TypeError, _distr3_gen, name='dummy')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped")
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
stats.genextreme.cdf(.5, 0)
stats.genextreme.pdf(.5, 0)
stats.genextreme.ppf(.5, 0)
stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 100
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-prob)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_crystalball_function():
"""
All values are calculated using the independent implementation of the
ROOT framework (see https://root.cern.ch/).
Corresponding ROOT code is given in the comments.
"""
X = np.linspace(-5.0, 5.0, 21)[:-1]
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0)
expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645,
0.059618, 0.0811467, 0.116851, 0.18258, 0.265652,
0.301023, 0.265652, 0.18258, 0.097728, 0.0407391,
0.013226, 0.00334407, 0.000658486, 0.000100982,
1.20606e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0)
expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121,
0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752,
0.345928, 0.391987, 0.345928, 0.237752, 0.12726,
0.0530497, 0.0172227, 0.00435458, 0.000857469,
0.000131497, 1.57051e-05])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249,
0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944,
0.172964, 0.189964, 0.195994, 0.189964, 0.172964,
0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866,
0.0265249])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0)
expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258,
0.208663, 0.24344, 0.292128, 0.36516, 0.478254,
0.622723, 0.767192, 0.880286, 0.94959, 0.982834,
0.995314, 0.998981, 0.999824, 0.999976, 0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5)
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", ";
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0)
expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682,
0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323,
0.320592, 0.508717, 0.696841, 0.844111, 0.934357,
0.977646, 0.993899, 0.998674, 0.999771, 0.999969,
0.999997])
assert_allclose(expected, calculated, rtol=0.001)
# for(float x = -5.0; x < 5.0; x+=0.5) {
# std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5);
# std::cout << ", ";
# }
calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0)
expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945,
0.0830763, 0.121242, 0.173323, 0.24011, 0.320592,
0.411731, 0.508717, 0.605702, 0.696841, 0.777324,
0.844111, 0.896192, 0.934357, 0.960639, 0.977646])
assert_allclose(expected, calculated, rtol=0.001)
def test_crystalball_function_moments():
"""
All values are calculated using the pdf formula and the integrate function
of Mathematica
"""
# The Last two (alpha, n) pairs test the special case n == alpha**2
beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0])
m = np.array([3.0, 3.0, 2.0, 4.0, 9.0])
# The distribution should be correctly normalised
expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
calculated_0th_moment = stats.crystalball._munp(0, beta, m)
assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001)
# calculated using wolframalpha.com
# e.g. for beta = 2 and m = 3 we calculate the norm like this:
# integrate exp(-x^2/2) from -2 to infinity +
# integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2
norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455])
a = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174])
expected_1th_moment = a / norm
calculated_1th_moment = stats.crystalball._munp(1, beta, m)
assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908])
expected_2th_moment = a / norm
calculated_2th_moment = stats.crystalball._munp(2, beta, m)
assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668])
expected_3th_moment = a / norm
calculated_3th_moment = stats.crystalball._munp(3, beta, m)
assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468])
expected_4th_moment = a / norm
calculated_4th_moment = stats.crystalball._munp(4, beta, m)
assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001)
a = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086])
expected_5th_moment = a / norm
calculated_5th_moment = stats.crystalball._munp(5, beta, m)
assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001)
def test_crystalball_entropy():
# regression test for gh-13602
cb = stats.crystalball(2, 3)
res1 = cb.entropy()
# -20000 and 30 are negative and positive infinity, respectively
lo, hi, N = -20000, 30, 200000
x = np.linspace(lo, hi, N)
res2 = trapezoid(entr(cb.pdf(x)), x)
assert_allclose(res1, res2, rtol=1e-7)
def test_invweibull():
"""
Test fitting invweibull to data.
Here is a the same calculation in R:
> library(evd)
> library(fitdistrplus)
> x = c(1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99)
> result = fitdist(x, 'frechet', control=list(reltol=1e-13),
+ fix.arg=list(loc=0), start=list(shape=2, scale=3))
> result
Fitting of the distribution ' frechet ' by maximum likelihood
Parameters:
estimate Std. Error
shape 1.048482 0.2261815
scale 3.099456 0.8292887
Fixed parameters:
value
loc 0
"""
def optimizer(func, x0, args=(), disp=0):
return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12)
x = np.array([1, 1.25, 2, 2.5, 2.8, 3, 3.8, 4, 5, 8, 10, 12, 64, 99])
c, loc, scale = stats.invweibull.fit(x, floc=0, optimizer=optimizer)
assert_allclose(c, 1.048482, rtol=5e-6)
assert loc == 0
assert_allclose(scale, 3.099456, rtol=5e-6)
@pytest.mark.parametrize(
'df1,df2,x',
[(2, 2, [-0.5, 0.2, 1.0, 2.3]),
(4, 11, [-0.5, 0.2, 1.0, 2.3]),
(7, 17, [1, 2, 3, 4, 5])]
)
def test_ncf_edge_case(df1, df2, x):
# Test for edge case described in gh-11660.
# Non-central Fisher distribution when nc = 0
# should be the same as Fisher distribution.
nc = 0
expected_cdf = stats.f.cdf(x, df1, df2)
calculated_cdf = stats.ncf.cdf(x, df1, df2, nc)
assert_allclose(expected_cdf, calculated_cdf, rtol=1e-14)
# when ncf_gen._skip_pdf will be used instead of generic pdf,
# this additional test will be useful.
expected_pdf = stats.f.pdf(x, df1, df2)
calculated_pdf = stats.ncf.pdf(x, df1, df2, nc)
assert_allclose(expected_pdf, calculated_pdf, rtol=1e-6)
def test_ncf_variance():
# Regression test for gh-10658 (incorrect variance formula for ncf).
# The correct value of ncf.var(2, 6, 4), 42.75, can be verified with, for
# example, Wolfram Alpha with the expression
# Variance[NoncentralFRatioDistribution[2, 6, 4]]
# or with the implementation of the noncentral F distribution in the C++
# library Boost.
v = stats.ncf.var(2, 6, 4)
assert_allclose(v, 42.75, rtol=1e-14)
class TestHistogram:
def setup_method(self):
np.random.seed(1234)
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5, size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm(1.0, 2.5).moment(n), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
def test_loguniform():
# This test makes sure the alias of "loguniform" is log-uniform
rv = stats.loguniform(10 ** -3, 10 ** 0)
rvs = rv.rvs(size=10000, random_state=42)
vals, _ = np.histogram(np.log10(rvs), bins=10)
assert 900 <= vals.min() <= vals.max() <= 1100
assert np.abs(np.median(vals) - 1000) <= 10
class TestArgus:
def test_argus_rvs_large_chi(self):
# test that the algorithm can handle large values of chi
x = stats.argus.rvs(50, size=500, random_state=325)
assert_almost_equal(stats.argus(50).mean(), x.mean(), decimal=4)
@pytest.mark.parametrize('chi, random_state', [
[0.1, 325], # chi <= 0.5: rejection method case 1
[1.3, 155], # 0.5 < chi <= 1.8: rejection method case 2
[3.5, 135] # chi > 1.8: transform conditional Gamma distribution
])
def test_rvs(self, chi, random_state):
x = stats.argus.rvs(chi, size=500, random_state=random_state)
_, p = stats.kstest(x, "argus", (chi, ))
assert_(p > 0.05)
@pytest.mark.parametrize('chi', [1e-9, 1e-6])
def test_rvs_small_chi(self, chi):
# test for gh-11699 => rejection method case 1 can even handle chi=0
# the CDF of the distribution for chi=0 is 1 - (1 - x**2)**(3/2)
# test rvs against distribution of limit chi=0
r = stats.argus.rvs(chi, size=500, random_state=890981)
_, p = stats.kstest(r, lambda x: 1 - (1 - x**2)**(3/2))
assert_(p > 0.05)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('chi, expected_mean',
[(1, 0.6187026683551835),
(10, 0.984805536783744),
(40, 0.9990617659702923),
(60, 0.9995831885165300),
(99, 0.9998469348663028)])
def test_mean(self, chi, expected_mean):
m = stats.argus.mean(chi, scale=1)
assert_allclose(m, expected_mean, rtol=1e-13)
# Expected values were computed with mpmath.
@pytest.mark.parametrize('chi, expected_var, rtol',
[(1, 0.05215651254197807, 1e-13),
(10, 0.00015805472008165595, 1e-11),
(40, 5.877763210262901e-07, 1e-8),
(60, 1.1590179389611416e-07, 1e-8),
(99, 1.5623277006064666e-08, 1e-8)])
def test_var(self, chi, expected_var, rtol):
v = stats.argus.var(chi, scale=1)
assert_allclose(v, expected_var, rtol=rtol)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize('chi, expected, rtol',
[(0.9, 0.07646314974436118, 1e-14),
(0.5, 0.015429797891863365, 1e-14),
(0.1, 0.0001325825293278049, 1e-14),
(0.01, 1.3297677078224565e-07, 1e-15),
(1e-3, 1.3298072023958999e-10, 1e-14),
(1e-4, 1.3298075973486862e-13, 1e-14),
(1e-6, 1.32980760133771e-19, 1e-14),
(1e-9, 1.329807601338109e-28, 1e-15)])
def test_argus_phi_small_chi(self, chi, expected, rtol):
assert_allclose(_argus_phi(chi), expected, rtol=rtol)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize(
'chi, expected',
[(0.5, (0.28414073302940573, 1.2742227939992954, 1.2381254688255896)),
(0.2, (0.296172952995264, 1.2951290588110516, 1.1865767100877576)),
(0.1, (0.29791447523536274, 1.29806307956989, 1.1793168289857412)),
(0.01, (0.2984904104866452, 1.2990283628160553, 1.1769268414080531)),
(1e-3, (0.298496172925224, 1.2990380082487925, 1.176902956021053)),
(1e-4, (0.29849623054991836, 1.2990381047023793, 1.1769027171686324)),
(1e-6, (0.2984962311319278, 1.2990381056765605, 1.1769027147562232)),
(1e-9, (0.298496231131986, 1.299038105676658, 1.1769027147559818))])
def test_pdf_small_chi(self, chi, expected):
x = np.array([0.1, 0.5, 0.9])
assert_allclose(stats.argus.pdf(x, chi), expected, rtol=1e-13)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize(
'chi, expected',
[(0.5, (0.9857660526895221, 0.6616565930168475, 0.08796070398429937)),
(0.2, (0.9851555052359501, 0.6514666238985464, 0.08362690023746594)),
(0.1, (0.9850670974995661, 0.6500061310508574, 0.08302050640683846)),
(0.01, (0.9850378582451867, 0.6495239242251358, 0.08282109244852445)),
(1e-3, (0.9850375656906663, 0.6495191015522573, 0.08281910005231098)),
(1e-4, (0.9850375627651049, 0.6495190533254682, 0.08281908012852317)),
(1e-6, (0.9850375627355568, 0.6495190528383777, 0.08281907992729293)),
(1e-9, (0.9850375627355538, 0.649519052838329, 0.0828190799272728))])
def test_sf_small_chi(self, chi, expected):
x = np.array([0.1, 0.5, 0.9])
assert_allclose(stats.argus.sf(x, chi), expected, rtol=1e-14)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize(
'chi, expected',
[(0.5, (0.0142339473104779, 0.3383434069831524, 0.9120392960157007)),
(0.2, (0.014844494764049919, 0.34853337610145363, 0.916373099762534)),
(0.1, (0.014932902500433911, 0.34999386894914264, 0.9169794935931616)),
(0.01, (0.014962141754813293, 0.35047607577486417, 0.9171789075514756)),
(1e-3, (0.01496243430933372, 0.35048089844774266, 0.917180899947689)),
(1e-4, (0.014962437234895118, 0.3504809466745317, 0.9171809198714769)),
(1e-6, (0.01496243726444329, 0.3504809471616223, 0.9171809200727071)),
(1e-9, (0.014962437264446245, 0.350480947161671, 0.9171809200727272))])
def test_cdf_small_chi(self, chi, expected):
x = np.array([0.1, 0.5, 0.9])
assert_allclose(stats.argus.cdf(x, chi), expected, rtol=1e-12)
# Expected values were computed with mpmath (code: see gh-13370).
@pytest.mark.parametrize(
'chi, expected, rtol',
[(0.5, (0.5964284712757741, 0.052890651988588604), 1e-12),
(0.101, (0.5893490968089076, 0.053017469847275685), 1e-11),
(0.1, (0.5893431757009437, 0.05301755449499372), 1e-13),
(0.01, (0.5890515677940915, 0.05302167905837031), 1e-13),
(1e-3, (0.5890486520005177, 0.053021719862088104), 1e-13),
(1e-4, (0.5890486228426105, 0.0530217202700811), 1e-13),
(1e-6, (0.5890486225481156, 0.05302172027420182), 1e-13),
(1e-9, (0.5890486225480862, 0.05302172027420224), 1e-13)])
def test_stats_small_chi(self, chi, expected, rtol):
val = stats.argus.stats(chi, moments='mv')
assert_allclose(val, expected, rtol=rtol)
class TestNakagami:
def test_logpdf(self):
# Test nakagami logpdf for an input where the PDF is smaller
# than can be represented with 64 bit floating point.
# The expected value of logpdf was computed with mpmath:
#
# def logpdf(x, nu):
# x = mpmath.mpf(x)
# nu = mpmath.mpf(nu)
# return (mpmath.log(2) + nu*mpmath.log(nu) -
# mpmath.loggamma(nu) + (2*nu - 1)*mpmath.log(x) -
# nu*x**2)
#
nu = 2.5
x = 25
logp = stats.nakagami.logpdf(x, nu)
assert_allclose(logp, -1546.9253055607549)
def test_sf_isf(self):
# Test nakagami sf and isf when the survival function
# value is very small.
# The expected value of the survival function was computed
# with mpmath:
#
# def sf(x, nu):
# x = mpmath.mpf(x)
# nu = mpmath.mpf(nu)
# return mpmath.gammainc(nu, nu*x*x, regularized=True)
#
nu = 2.5
x0 = 5.0
sf = stats.nakagami.sf(x0, nu)
assert_allclose(sf, 2.736273158588307e-25, rtol=1e-13)
# Check round trip back to x0.
x1 = stats.nakagami.isf(sf, nu)
assert_allclose(x1, x0, rtol=1e-13)
@pytest.mark.xfail(reason="Fit of nakagami not reliable, see gh-10908.")
@pytest.mark.parametrize('nu', [1.6, 2.5, 3.9])
@pytest.mark.parametrize('loc', [25.0, 10, 35])
@pytest.mark.parametrize('scale', [13, 5, 20])
def test_fit(self, nu, loc, scale):
# Regression test for gh-13396 (21/27 cases failed previously)
# The first tuple of the parameters' values is discussed in gh-10908
N = 100
samples = stats.nakagami.rvs(size=N, nu=nu, loc=loc,
scale=scale, random_state=1337)
nu_est, loc_est, scale_est = stats.nakagami.fit(samples)
assert_allclose(nu_est, nu, rtol=0.2)
assert_allclose(loc_est, loc, rtol=0.2)
assert_allclose(scale_est, scale, rtol=0.2)
def dlogl_dnu(nu, loc, scale):
return ((-2*nu + 1) * np.sum(1/(samples - loc))
+ 2*nu/scale**2 * np.sum(samples - loc))
def dlogl_dloc(nu, loc, scale):
return (N * (1 + np.log(nu) - polygamma(0, nu)) +
2 * np.sum(np.log((samples - loc) / scale))
- np.sum(((samples - loc) / scale)**2))
def dlogl_dscale(nu, loc, scale):
return (- 2 * N * nu / scale
+ 2 * nu / scale ** 3 * np.sum((samples - loc) ** 2))
assert_allclose(dlogl_dnu(nu_est, loc_est, scale_est), 0, atol=1e-3)
assert_allclose(dlogl_dloc(nu_est, loc_est, scale_est), 0, atol=1e-3)
assert_allclose(dlogl_dscale(nu_est, loc_est, scale_est), 0, atol=1e-3)
@pytest.mark.parametrize('loc', [25.0, 10, 35])
@pytest.mark.parametrize('scale', [13, 5, 20])
def test_fit_nu(self, loc, scale):
# For nu = 0.5, we have analytical values for
# the MLE of the loc and the scale
nu = 0.5
n = 100
samples = stats.nakagami.rvs(size=n, nu=nu, loc=loc,
scale=scale, random_state=1337)
nu_est, loc_est, scale_est = stats.nakagami.fit(samples, f0=nu)
# Analytical values
loc_theo = np.min(samples)
scale_theo = np.sqrt(np.mean((samples - loc_est) ** 2))
assert_allclose(nu_est, nu, rtol=1e-7)
assert_allclose(loc_est, loc_theo, rtol=1e-7)
assert_allclose(scale_est, scale_theo, rtol=1e-7)
class TestWrapCauchy:
def test_cdf_shape_broadcasting(self):
# Regression test for gh-13791.
# Check that wrapcauchy.cdf broadcasts the shape parameter
# correctly.
c = np.array([[0.03, 0.25], [0.5, 0.75]])
x = np.array([[1.0], [4.0]])
p = stats.wrapcauchy.cdf(x, c)
assert p.shape == (2, 2)
scalar_values = [stats.wrapcauchy.cdf(x1, c1)
for (x1, c1) in np.nditer((x, c))]
assert_allclose(p.ravel(), scalar_values, rtol=1e-13)
def test_cdf_center(self):
p = stats.wrapcauchy.cdf(np.pi, 0.03)
assert_allclose(p, 0.5, rtol=1e-14)
def test_cdf(self):
x1 = 1.0 # less than pi
x2 = 4.0 # greater than pi
c = 0.75
p = stats.wrapcauchy.cdf([x1, x2], c)
cr = (1 + c)/(1 - c)
assert_allclose(p[0], np.arctan(cr*np.tan(x1/2))/np.pi)
assert_allclose(p[1], 1 - np.arctan(cr*np.tan(np.pi - x2/2))/np.pi)
def test_rvs_no_size_warning():
class rvs_no_size_gen(stats.rv_continuous):
def _rvs(self):
return 1
rvs_no_size = rvs_no_size_gen(name='rvs_no_size')
with assert_warns(np.VisibleDeprecationWarning):
rvs_no_size.rvs()
@pytest.mark.parametrize('distname, args', invdistdiscrete + invdistcont)
def test_support_gh13294_regression(distname, args):
if distname in skip_test_support_gh13294_regression:
pytest.skip(f"skipping test for the support method for "
f"distribution {distname}.")
dist = getattr(stats, distname)
# test support method with invalid arguents
if isinstance(dist, stats.rv_continuous):
# test with valid scale
if len(args) != 0:
a0, b0 = dist.support(*args)
assert_equal(a0, np.nan)
assert_equal(b0, np.nan)
# test with invalid scale
# For some distributions, that take no parameters,
# the case of only invalid scale occurs and hence,
# it is implicitly tested in this test case.
loc1, scale1 = 0, -1
a1, b1 = dist.support(*args, loc1, scale1)
assert_equal(a1, np.nan)
assert_equal(b1, np.nan)
else:
a, b = dist.support(*args)
assert_equal(a, np.nan)
assert_equal(b, np.nan)
def test_support_broadcasting_gh13294_regression():
a0, b0 = stats.norm.support([0, 0, 0, 1], [1, 1, 1, -1])
ex_a0 = np.array([-np.inf, -np.inf, -np.inf, np.nan])
ex_b0 = np.array([np.inf, np.inf, np.inf, np.nan])
assert_equal(a0, ex_a0)
assert_equal(b0, ex_b0)
assert a0.shape == ex_a0.shape
assert b0.shape == ex_b0.shape
a1, b1 = stats.norm.support([], [])
ex_a1, ex_b1 = np.array([]), np.array([])
assert_equal(a1, ex_a1)
assert_equal(b1, ex_b1)
assert a1.shape == ex_a1.shape
assert b1.shape == ex_b1.shape
a2, b2 = stats.norm.support([0, 0, 0, 1], [-1])
ex_a2 = np.array(4*[np.nan])
ex_b2 = np.array(4*[np.nan])
assert_equal(a2, ex_a2)
assert_equal(b2, ex_b2)
assert a2.shape == ex_a2.shape
assert b2.shape == ex_b2.shape
def test_stats_broadcasting_gh14953_regression():
# test case in gh14953
loc = [0., 0.]
scale = [[1.], [2.], [3.]]
assert_equal(stats.norm.var(loc, scale), [[1., 1.], [4., 4.], [9., 9.]])
# test some edge cases
loc = np.empty((0, ))
scale = np.empty((1, 0))
assert stats.norm.var(loc, scale).shape == (1, 0)
# Check a few values of the cosine distribution's cdf, sf, ppf and
# isf methods. Expected values were computed with mpmath.
@pytest.mark.parametrize('x, expected',
[(-3.14159, 4.956444476505336e-19),
(3.14, 0.9999999998928399)])
def test_cosine_cdf_sf(x, expected):
assert_allclose(stats.cosine.cdf(x), expected)
assert_allclose(stats.cosine.sf(-x), expected)
@pytest.mark.parametrize('p, expected',
[(1e-6, -3.1080612413765905),
(1e-17, -3.141585429601399),
(0.975, 2.1447547020964923)])
def test_cosine_ppf_isf(p, expected):
assert_allclose(stats.cosine.ppf(p), expected)
assert_allclose(stats.cosine.isf(p), -expected)
def test_cosine_logpdf_endpoints():
logp = stats.cosine.logpdf([-np.pi, np.pi])
assert_equal(logp, [-np.inf, -np.inf])
def test_distr_params_lists():
# distribution objects are extra distributions added in
# test_discrete_basic. All other distributions are strings (names)
# and so we only choose those to compare whether both lists match.
discrete_distnames = {name for name, _ in distdiscrete
if isinstance(name, str)}
invdiscrete_distnames = {name for name, _ in invdistdiscrete}
assert discrete_distnames == invdiscrete_distnames
cont_distnames = {name for name, _ in distcont}
invcont_distnames = {name for name, _ in invdistcont}
assert cont_distnames == invcont_distnames
|
zerothi/scipy
|
scipy/stats/tests/test_distributions.py
|
Python
|
bsd-3-clause
| 261,929
|
[
"Gaussian"
] |
9ee666aff1a6c73b18d1f6dfc55e88a94047d9583d1a5dfa177d3f22cc33b8a1
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=R0914,R0912,R0913
# Utility methods for Fullprof
from __future__ import (absolute_import, division, print_function)
import os
import math
def calculate_intensity_difference(reflection_dict1, reflection_dict2):
"""
Calculate the difference of the intensities on same reflection between 2 sets of measurements
:param reflection_dict1:
:param reflection_dict2:
:return:
"""
# check validity
assert isinstance(reflection_dict1, dict), 'Input 1 must be a dictionary'
assert isinstance(reflection_dict2, dict), 'Input 2 must be a dictionary'
# get a list of HKL
hkl_list = sorted(reflection_dict1.keys())
# output
out_dict = dict()
for hkl in hkl_list:
# skip if the HKL does not exist in both sets
if hkl not in reflection_dict2:
continue
intensity_1, var_1 = reflection_dict1[hkl]
intensity_2, var_2 = reflection_dict2[hkl]
diff_intensity = intensity_1 - intensity_2
diff_var = math.sqrt(var_1**2 + var_2**2)
out_dict[hkl] = (diff_intensity, diff_var)
# END-FOR
return out_dict
def load_scd_fullprof_intensity_file(file_name):
"""
load a single crystal diffraction Fullprof intensity file
:param file_name:
:return: 2-tuple. dictionary for reflection (key = hkl, value = (intensity, error)); string as error message
"""
# check validity
assert isinstance(file_name, str), 'Fullprof SCD intensity file %s must be a string but not of type %s.' \
'' % (str(file_name), type(file_name))
assert os.path.exists(file_name), 'Fullprof SCD intensity file %s cannot be found.' % file_name
# open file
scd_int_file = open(file_name, 'r')
raw_lines = scd_int_file.readlines()
scd_int_file.close()
# parse file
wave_length = 0.
num_k_vector = 0
k_index = 0
error_buffer = ''
reflection_dict = dict() # key: 3-tuple as (h, k, l)
for line_index, raw_line in enumerate(raw_lines):
# clean the line
line = raw_line.strip()
if len(line) == 0:
continue
if line_index == 0:
# line 1 as header
pass
elif line.startswith('('):
# line 2 format line, skip
continue
elif line.endswith('0 0'):
# line 3 as wave length line
wave_length = float(line.split()[0])
elif k_index < num_k_vector:
# k-vector line: (num_k_vector) line right after k-indication line
k_index += 1
else:
# split
terms = line.split()
if len(terms) == 1:
# k-vector
num_k_vector = int(terms[0])
continue
# line that cannot be parsed
if len(terms) < 5:
# some line may have problem. print out and warning
error_buffer += 'unable to parse line %-3d: %s\n' % (line_index, line)
continue
try:
lattice_h = int(terms[0])
lattice_k = int(terms[1])
lattice_l = int(terms[2])
intensity = float(terms[3])
variation = float(terms[4])
except ValueError:
error_buffer += 'unable to parse line %-3d: %s\n' % (line_index, line)
else:
reflection_dict[(lattice_h, lattice_k, lattice_l)] = (intensity, variation)
# END-IF-ELSE
# END-FOR
return reflection_dict, wave_length, error_buffer
def convert_to_peak_dict_list(refection_dict):
"""
Convert Reflection dictionary to peak dictionary for writing out to Fullprof format
:param refection_dict:
:return:
"""
# check validity
assert isinstance(refection_dict, dict)
peak_dict_list = list()
# loop around all HKL
for hkl in sorted(refection_dict.keys()):
intensity, sigma = refection_dict[hkl]
peak_dict = {'hkl': hkl,
'kindex': 0,
'intensity': intensity,
'sigma': sigma}
peak_dict_list.append(peak_dict)
return peak_dict_list
def write_scd_fullprof_kvector(user_header, wave_length, k_vector_dict, peak_dict_list, fp_file_name,
with_absorption, high_precision):
"""
Purpose: Export integrated peaks to single crystal diffraction Fullprof file
Requirements:
Guarantees: a Fullprof is written
Note:
1. peak parameter dictionary: keys are 'hkl', 'kindex', 'intensity', and 'sigma'
:param user_header: user defined header (information)
:param wave_length: wavelength
:param k_vector_dict:
:param peak_dict_list: a list of peak parameters stored in dictionaries.
:param fp_file_name:
:param with_absorption:
:param high_precision:
:return:
"""
# check input validity
assert isinstance(user_header, str), 'User header must be a string.'
assert isinstance(wave_length, float), 'Neutron wave length must be a float.'
assert isinstance(k_vector_dict, dict), 'K-vector list must be a dictionary.'
assert isinstance(peak_dict_list, list), 'Peak-dictionary list must be a list.'
assert isinstance(high_precision, bool), 'blabla'
# determine whether the output is for magnetic peaks or nuclear peaks
# assuming that either all peaks are magnetic or all peaks are nuclear
num_k_vectors = len(k_vector_dict)
peak_0_dict = peak_dict_list[0]
assert isinstance(peak_0_dict, dict) and 'kindex' in peak_0_dict, 'blabla'
peak0_is_magnetic = peak_0_dict['kindex'] > 0 and num_k_vectors > 0
# set up fullprof .ini file buffer
fp_buffer = ''
# user defined header
header = 'COMM %s' % user_header.strip()
# fixed file format line: Only magnetic case use 4i4,2f8,...
if peak0_is_magnetic:
first_3_terms_foramt = '4i4'
else:
first_3_terms_foramt = '3i4'
if with_absorption:
file_format = '(%s,2f8.2,i4,6f8.5)' % first_3_terms_foramt
elif high_precision:
# precisions: (3i4,2f18.5,i4)
file_format = '({0},2f18.5,i4)'.format(first_3_terms_foramt)
else:
# precisions: (3i4,2f8.2,i4)
file_format = '(%s,2f8.2,i4)' % first_3_terms_foramt
# END-IF
# wave length
lambda_line = '%.4f 0 0' % wave_length
fp_buffer += header + '\n' + file_format + '\n' + lambda_line + '\n'
# tricky one. number of K vectors
if num_k_vectors > 0:
# number of k vectors
kline = '%d' % num_k_vectors
for k_index in k_vector_dict.keys():
k_vector = k_vector_dict[k_index]
# write k_x, k_y, k_z
kline += '\n%d %.3f %.3f %.3f' % (k_index, k_vector[0], k_vector[1], k_vector[2])
fp_buffer += kline + '\n'
# END-IF
# again in body,
if high_precision:
# '%18.5f%18.5f%4d'
part3_format = '{0:18.5f}{1:18.5f}{2:4d}'
else:
part3_format = '%8.2f%8.2f%4d'
# peak intensities
for i_peak, peak_dict in enumerate(peak_dict_list):
# check
assert isinstance(peak_dict, dict), '%d-th peak must be a dictionary but not %s.' % (i_peak,
str(type(peak_dict)))
for key in ['hkl', 'kindex', 'intensity', 'sigma']:
assert key in peak_dict, '%d-th peak dictionary does not have required key %s.' % (i_peak, key)
# check whether it is magnetic
if num_k_vectors > 0 and peak_dict['kindex'] > 0:
is_magnetic = True
else:
is_magnetic = False
# miller index
m_h, m_k, m_l = peak_dict['hkl']
if is_magnetic:
k_index = peak_dict['kindex']
k_x, k_y, k_z = k_vector_dict[k_index]
else:
k_x = k_y = k_z = 0.0
k_index = 0
# remove the magnetic k-shift vector from HKL
part1 = '%4d%4d%4d' % (nearest_int(m_h-k_x), nearest_int(m_k-k_y), nearest_int(m_l-k_z))
# k index
if is_magnetic:
part2 = '%4d' % k_index
else:
part2 = ''
# END-IF-ELSE
# peak intensity and sigma
try:
if high_precision:
part3 = part3_format.format(peak_dict['intensity'], peak_dict['sigma'], 1)
else:
part3 = '%8.2f%8.2f%4d' % (peak_dict['intensity'], peak_dict['sigma'], 1)
except TypeError as type_err:
raise RuntimeError('In writing Fullprof file, unable to convert intensity {0} and/or sigma {1} to '
'floats. FYI: {2}'.format(peak_dict['intensity'], peak_dict['sigma'], type_err))
peak_line = part1 + part2 + part3
# absorption
if 'up' in peak_dict:
part4 = ''
for i in range(3):
part4 += '%8.5f%8.5f' % (peak_dict['up'][i], peak_dict['us'][i])
peak_line += part4
fp_buffer += peak_line + '\n'
# END-FOR (peak_dict)
# write to file
try:
ofile = open(fp_file_name, 'w')
ofile.write(fp_buffer)
ofile.close()
except IOError as io_err:
err_msg = 'Unable to write to Fullprof single crystal file at %s due to %s..' % (fp_file_name, str(io_err))
raise RuntimeError(err_msg)
return fp_buffer
def nearest_int(number):
"""
"""
if number > 0:
answer = int(number + 0.5)
else:
answer = int(number - 0.5)
return answer
|
mganeva/mantid
|
scripts/HFIR_4Circle_Reduction/fputility.py
|
Python
|
gpl-3.0
| 9,894
|
[
"CRYSTAL"
] |
193f5b94db91604ffc6935a60d930ba61a6f10ebf8b353329da3f31b83f18c2a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module implements classes for processing Lammps output files:
1. log file: contains the thermodynamic data with the format set by the
'thermo_style' command
2. trajectory file(dump file): the file generated by the 'dump' command
Restrictions:
The first 2 fields of the ATOMS section in the trajectory(dump) file
must be the atom id and the atom type. There can be arbitrary number
of fields after that and they all will be treated as floats and
updated based on the field names in the ITEM: ATOMS line.
"""
import re
from io import open
import numpy as np
from pymatgen.core.periodic_table import _pt_data
from pymatgen.core.structure import Molecule
from pymatgen.core.lattice import Lattice
from pymatgen.analysis.diffusion_analyzer import DiffusionAnalyzer
from pymatgen.io.lammps.data import LammpsData, LammpsForceFieldData
__author__ = "Kiran Mathew"
__email__ = "[email protected]"
__credits__ = "Navnidhi Rajput, Michael Humbert"
class LammpsRun(object):
"""
Parse the lammps data file, trajectory file and the log file to extract
useful info about the system.
Args:
data_file (str): path to the data file
trajectory_file (str): path to the trajectory file
log_file (str): path to the log file
"""
def __init__(self, data_file, trajectory_file, log_file="log.lammps",
is_forcefield=False):
self.data_file = data_file
self.trajectory_file = trajectory_file
self.log_file = log_file
self.lammps_log = LammpsLog(log_file)
if is_forcefield:
self.lammps_data = LammpsForceFieldData.from_file(data_file)
else:
self.lammps_data = LammpsData.from_file(data_file)
self._set_mol_masses_and_charges()
self._parse_trajectory()
def _parse_trajectory(self):
"""
parse the trajectory file.
"""
traj_timesteps = []
trajectory = []
timestep_label = "ITEM: TIMESTEP"
# "ITEM: ATOMS id type ...
traj_label_pattern = re.compile(
"^\s*ITEM:\s+ATOMS\s+id\s+type\s+([A-Za-z\s]*)")
# default: id type x y z vx vy vz mol"
# updated below based on the field names in the ITEM: ATOMS line
# Note: the first 2 fields must be the id and the atom type. There can be
# arbitrary number of fields after that and they all will be treated as floats.
traj_pattern = re.compile(
"\s*(\d+)\s+(\d+)\s+([0-9eE\.+-]+)\s+([0-9eE\.+-]+)\s+"
"([0-9eE\.+-]+)\s+"
"([0-9eE\.+-]+)\s+"
"([0-9eE\.+-]+)\s+([0-9eE\.+-]+)\s+(\d+)\s*")
parse_timestep = False
with open(self.trajectory_file) as tf:
for line in tf:
if timestep_label in line:
parse_timestep = True
continue
if parse_timestep:
traj_timesteps.append(float(line))
parse_timestep = False
if traj_label_pattern.search(line):
fields = traj_label_pattern.search(line).group(1)
fields = fields.split()
# example:- id type x y z vx vy vz mol ...
traj_pattern_string = "\s*(\d+)\s+(\d+)" + "".join(
["\s+([0-9eE\.+-]+)" for _ in range(len(fields))])
traj_pattern = re.compile(traj_pattern_string)
if traj_pattern.search(line):
# first 2 fields must be id and type, the rest of them
# will be casted as floats
m = traj_pattern.search(line)
line_data = []
line_data.append(int(m.group(1)) - 1)
line_data.append(int(m.group(2)))
line_data.extend(
[float(x) for i, x in enumerate(m.groups()) if i + 1 > 2])
trajectory.append(tuple(line_data))
traj_dtype = np.dtype([(str('Atoms_id'), np.int64),
(str('atom_type'), np.int64)] +
[(str(fld), np.float64) for fld in fields])
self.trajectory = np.array(trajectory, dtype=traj_dtype)
self.timesteps = np.array(traj_timesteps, dtype=np.float64)
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
self.trajectory[begin:end] = np.sort(self.trajectory[begin:end],
order=str("Atoms_id"))
def _set_mol_masses_and_charges(self):
"""
set the charge, mass and the atomic makeup for each molecule
"""
mol_config = [] # [ [atom id1, atom id2, ...], ... ]
mol_masses = [] # [ [atom mass1, atom mass2, ...], ... ]
# mol_charges = []
unique_atomic_masses = np.array(self.lammps_data.atomic_masses)[:, 1]
atoms_data = np.array(self.lammps_data.atoms_data)
mol_ids = atoms_data[:, 1].astype(np.int64)
atom_ids = atoms_data[:, 0].astype(np.int64)
unique_mol_ids = np.unique(mol_ids)
atomic_types = atoms_data[:, 2].astype(np.int64)
atomic_masses = unique_atomic_masses[atomic_types - 1]
# atomic_charges = atoms_data[:, 3]
self.nmols = unique_mol_ids.size
for umid in range(self.nmols):
mol_config.append(atom_ids[np.where(mol_ids == umid + 1)] - 1)
mol_masses.append(atomic_masses[np.where(mol_ids == umid + 1)])
# mol_charges.append(np.sum(atomic_charges[np.where(mol_ids == umid+1)]))
self.mol_config = np.array(mol_config)
self.mol_masses = np.array(mol_masses)
def _weighted_average(self, mol_id, mol_vector):
"""
Calculate the weighted average of the array comprising of
atomic vectors corresponding to the molecule with id mol_id.
Args:
mol_id (int): molecule id
mol_vector (numpy array): array of shape,
natoms_in_molecule with id mol_id x 3
Returns:
1D numpy array(3 x 1) of weighted averages in x, y, z directions
"""
mol_masses = self.mol_masses[mol_id]
return np.array([np.dot(mol_vector[:, dim], mol_masses) / np.sum(mol_masses)
for dim in range(3)])
def _get_mol_vector(self, step, mol_id, param=["x", "y", "z"]):
"""
Returns numpy array corresponding to atomic vectors of parameter
"param" for the given time step and molecule id
Args:
step (int): time step
mol_id (int): molecule id
param (list): the atomic parameter for which the weighted
average is to be computed
Returns:
2D numpy array(natoms_in_molecule x 3) of atomic vectors
"""
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][self.mol_config[mol_id]][param]
new_shape = mol_vector_structured.shape + (-1,)
mol_vector = mol_vector_structured.view(np.float64).reshape(new_shape)
return mol_vector.copy()
# TODO: remove this and use only get_displacements(an order of magnitude faster)
def get_structures_from_trajectory(self):
"""
Convert the coordinates in each time step to a structure(boxed molecule).
Used to construct DiffusionAnalyzer object.
Returns:
list of Structure objects
"""
structures = []
mass_to_symbol = dict(
(round(y["Atomic mass"], 1), x) for x, y in _pt_data.items())
unique_atomic_masses = np.array(self.lammps_data.atomic_masses)[:, 1]
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][:][["x", "y", "z"]]
new_shape = mol_vector_structured.shape + (-1,)
mol_vector = mol_vector_structured.view(np.float64).reshape(
new_shape)
coords = mol_vector.copy()
species = [mass_to_symbol[round(unique_atomic_masses[atype - 1], 1)]
for atype in self.trajectory[begin:end][:]["atom_type"]]
mol = Molecule(species, coords)
try:
boxed_mol = mol.get_boxed_structure(*self.box_lengths)
except ValueError as error:
print("Error: '{}' at timestep {} in the trajectory".format(
error,
int(self.timesteps[step])))
structures.append(boxed_mol)
return structures
def get_displacements(self):
"""
Return the initial structure and displacements for each time step.
Used to interface witht the DiffusionAnalyzer.
Returns:
Structure object, numpy array of displacements
"""
lattice = Lattice([[self.box_lengths[0], 0, 0],
[0, self.box_lengths[1], 0],
[0, 0, self.box_lengths[2]]])
mass_to_symbol = dict(
(round(y["Atomic mass"], 1), x) for x, y in _pt_data.items())
unique_atomic_masses = np.array(self.lammps_data.atomic_masses)[:, 1]
frac_coords = []
for step in range(self.timesteps.size):
begin = step * self.natoms
end = (step + 1) * self.natoms
mol_vector_structured = \
self.trajectory[begin:end][:][["x", "y", "z"]]
new_shape = mol_vector_structured.shape + (-1,)
mol_vector = mol_vector_structured.view(np.float64).reshape(
new_shape)
coords = mol_vector.copy()
if step == 0:
species = [
mass_to_symbol[round(unique_atomic_masses[atype - 1], 1)]
for atype in self.trajectory[begin:end][:]["atom_type"]]
mol = Molecule(species, coords)
structure = mol.get_boxed_structure(*self.box_lengths)
step_frac_coords = [lattice.get_fractional_coords(crd)
for crd in coords]
frac_coords.append(np.array(step_frac_coords)[:, None])
frac_coords = np.concatenate(frac_coords, axis=1)
dp = frac_coords[:, 1:] - frac_coords[:, :-1]
dp = dp - np.round(dp)
f_disp = np.cumsum(dp, axis=1)
disp = lattice.get_cartesian_coords(f_disp)
return structure, disp
def get_diffusion_analyzer(self, specie, temperature, time_step, step_skip,
smoothed=None, min_obs=30, avg_nsteps=1000):
"""
Args:
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
For the other parameters please see the
pymatgen.analysis.diffusion_analyzer.DiffusionAnalyzer documentation.
Returns:
DiffusionAnalyzer
"""
# structures = self.get_structures_from_trajectory()
structure, disp = self.get_displacements()
return DiffusionAnalyzer(structure, disp, specie, temperature,
time_step, step_skip=step_skip, smoothed=smoothed,
min_obs=min_obs, avg_nsteps=avg_nsteps)
@property
def natoms(self):
return self.lammps_data.natoms
@property
def box_lengths(self):
return [l[1] - l[0] for l in self.lammps_data.box_size]
@property
def traj_timesteps(self):
"""
trajectory time steps in time units.
e.g. for units = real, time units = fmsec
"""
return self.timesteps * self.lammps_log.timestep
@property
def mol_trajectory(self):
"""
Compute the weighted average trajectory of each molecule at each
timestep
Returns:
2D numpy array ((n_timesteps*mols_number) x 3)
"""
traj = []
for step in range(self.timesteps.size):
tmp_mol = []
for mol_id in range(self.nmols):
mol_coords = self._get_mol_vector(step, mol_id,
param=["x", "y", "z"])
# take care of periodic boundary conditions
pbc_wrap(mol_coords, self.box_lengths)
tmp_mol.append(self._weighted_average(mol_id, mol_coords))
traj.append(tmp_mol)
return np.array(traj)
@property
def mol_velocity(self):
"""
Compute the weighted average velcoity of each molecule at each
timestep.
Returns:
2D numpy array ((n_timesteps*mols_number) x 3)
"""
velocity = []
for step in range(self.timesteps.size):
tmp_mol = []
for mol_id in range(self.nmols):
mol_velocities = self._get_mol_vector(step, mol_id,
param=["vx", "vy", "vz"])
tmp_mol.append(self._weighted_average(mol_id, mol_velocities))
velocity.append(tmp_mol)
return np.array(velocity)
class LammpsLog(object):
"""
Parser for LAMMPS log file.
"""
def __init__(self, log_file="log.lammps"):
"""
Args:
log_file (string): path to the loag file
"""
self.log_file = log_file
self._parse_log()
def _parse_log(self):
"""
Parse the log file for the thermodynamic data.
Sets the thermodynamic data as a structured numpy array with field names
taken from the the thermo_style command.
"""
thermo_data = []
thermo_pattern = None
with open(self.log_file, 'r') as logfile:
for line in logfile:
# timestep, the unit depedns on the 'units' command
time = re.search('timestep\s+([0-9]+)', line)
if time and not thermo_data:
self.timestep = float(time.group(1))
# total number md steps
steps = re.search('run\s+([0-9]+)', line)
if steps and not thermo_data:
self.nmdsteps = int(steps.group(1))
# logging interval
thermo = re.search('thermo\s+([0-9]+)', line)
if thermo and not thermo_data:
self.interval = float(thermo.group(1))
# thermodynamic data, set by the thermo_style command
format = re.search('thermo_style.+', line)
if format and not thermo_data:
fields = format.group().split()[2:]
thermo_pattern_string = "\s*([0-9eE\.+-]+)" + "".join(
["\s+([0-9eE\.+-]+)" for _ in range(len(fields) - 1)])
thermo_pattern = re.compile(thermo_pattern_string)
if thermo_pattern:
if thermo_pattern.search(line):
m = thermo_pattern.search(line)
thermo_data.append(
tuple([float(x) for i, x in enumerate(m.groups())]))
thermo_data_dtype = np.dtype([(str(fld), np.float64) for fld in fields])
self.thermo_data = np.array(thermo_data, dtype=thermo_data_dtype)
def pbc_wrap(array, box_lengths):
"""
wrap the array for molecule coordinates around the periodic boundary.
Args:
array (numpy.ndarray): molecule coordinates, [[x1,y1,z1],[x2,y2,z2],..]
box_lengths (list): [x_length, y_length, z_length]
"""
ref = array[0, 0]
for i in range(3):
array[:, i] = np.where((array[:, i] - ref) >= box_lengths[i] / 2,
array[:, i] - box_lengths[i], array[:, i])
array[:, i] = np.where((array[:, i] - ref) < -box_lengths[i] / 2,
array[:, i] + box_lengths[i], array[:, i])
|
aykol/pymatgen
|
pymatgen/io/lammps/output.py
|
Python
|
mit
| 16,639
|
[
"LAMMPS",
"pymatgen"
] |
65599feaeaa3deea68020337f0f7612a1309ae39d2a733c98468313c3d3e49d3
|
# -*- coding: utf-8 -*-
'''
This file is part of Arcte.
Arcte is software that simplifies the creation of 3D printable atomic structures,
with added features for the blind.
Copyright (C) 2015 Jesse Smith and contributors
Arcte is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Arcte is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Arcte. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import division
from __future__ import print_function
from qt4 import *
from vtk import *
import copy
class ExportDialog(QDialog):
def __init__(self,fileType,glyphs,parent=None):
super(ExportDialog,self).__init__(parent)
self.fileType = fileType
self.operation = None
self.brailleGlyphs = glyphs
self.fileInfo = QFileDialog().getSaveFileNameAndFilter(parent=self, filter="*."+self.fileType)
self.booleanOpt = QRadioButton("Boolean union all objects to single mesh (single file, single mesh).",parent=self)
self.booleanOpt.setChecked(True)
self.mergedOpt = QRadioButton("Place all objects into single file (single file, multiple meshes).",parent=self)
# self.mergedOpt.setChecked(True)
self.separateOpt = QRadioButton("Save separate file for each object (multiple files, multiple meshes).",parent=self)
if self.fileInfo[0]:
self.dialog = self.getOptions()
self.dialog.show()
def getOptions(self):
optionsDialog = QDialog(parent = self)
optionsDialog.setModal(True)
optionsDialog.setWindowTitle("Options")
layout = QVBoxLayout()
optionsDialog.setLayout(layout)
group1 = QGroupBox('Output format')
layout.addWidget(group1)
g1Layout = QVBoxLayout()
group1.setLayout(g1Layout)
g1Layout.addWidget(self.mergedOpt)
g1Layout.addWidget(self.booleanOpt)
g1Layout.addWidget(self.separateOpt)
group2 = QGroupBox('Output scale')
layout.addWidget(group2)
g2Layout = QVBoxLayout()
group2.setLayout(g2Layout)
scaleLayout = QHBoxLayout()
scaleLbl = QLabel('Scale factor:')
scaleLayout.addWidget(scaleLbl)
self.scaleSpin = QDoubleSpinBox()
self.scaleSpin.setDecimals(5)
self.scaleSpin.setValue(1.0)
self.scaleSpin.setMinimum(0.00001)
self.scaleSpin.setSingleStep(0.01)
self.scaleSpin.valueChanged.connect(self.scaleChanged)
scaleLayout.addWidget(self.scaleSpin)
g2Layout.addLayout(scaleLayout)
bounds = self.parent().ren.ComputeVisiblePropBounds()
self.ybound = bounds[3] - bounds[2]
self.xbound = bounds[1] - bounds[0]
self.zbound = bounds[5] - bounds[4]
self.xlabel = QLabel('Length (x) = '+str(self.xbound*self.scaleSpin.value())+' a.u.')
self.ylabel = QLabel('Width (y) = '+str(self.ybound*self.scaleSpin.value())+' a.u.')
self.zlabel = QLabel('Height (z) = '+str(self.zbound*self.scaleSpin.value())+' a.u.')
g2Layout.addWidget(self.xlabel)
g2Layout.addWidget(self.ylabel)
g2Layout.addWidget(self.zlabel)
buttonbox = QDialogButtonBox(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
layout.addWidget(buttonbox)
buttonbox.accepted.connect(self.ACCEPT)
buttonbox.rejected.connect(self.REJECT)
return optionsDialog
def scaleChanged(self,value):
self.xlabel.setText('Length (x) = '+str(self.xbound*value))
self.ylabel.setText('Width (y) = '+str(self.ybound*value))
self.zlabel.setText('Height (z) = '+str(self.zbound*value))
def ACCEPT(self):
if self.booleanOpt.isChecked():
operation = "boolean"
elif self.mergedOpt.isChecked():
operation = "merged"
elif self.separateOpt.isChecked():
operation = "separated"
self.dialog.close()
ExportProgress(self.parent().atoms,
self.parent().bonds,
self.brailleGlyphs,
self.parent().ren,
self.fileInfo,
operation,
self.scaleSpin.value(),
parent=self.parent())
self.accept()
def REJECT(self):
self.dialog.close()
self.reject()
class ExportProgress(QProgressDialog):
def __init__(self,*args,**kwargs):
super(ExportProgress,self).__init__(**kwargs)
self.setModal(True)
self.atoms = args[0]
self.bonds = args[1]
self.braille = args[2]
self.ren = args[3]
self.fileInfo = args[4]
self.operation = args[5]
self.scale = args[6]
atoms = [self.atoms[i][3] for i in range(len(self.atoms))]
bonds = [self.bonds[i][1] for i in range(len(self.bonds))]
# glyphs = [self.braille[i][1].dots for i in range(len(self.braille))] +\
# [self.braille[i][1].cyl for i in range(len(self.braille))]
glyphs = [self.braille[i][1].dotsTransform for i in range(len(self.braille))] + [self.braille[i][1].cyl for i in range(len(self.braille))]
print(glyphs)
#self.objs = atoms+bonds+glyphs+[self.braille[0][1].test]
self.objs = atoms+bonds+glyphs
maxRng = len(self.atoms) + len(self.bonds) + len(self.braille)
self.setUpdatesEnabled(True)
self.setAutoClose(False)
self.setAutoReset(False)
self.setWindowTitle("Exporting to "+self.fileInfo[0]+self.fileInfo[1])
self.resize(QSize(700,self.height()))
self.setCancelButtonText("Abort")
self.setRange(0,maxRng)
self.setModal(True)
self.setMinimumDuration(1.5)
self.outThread = ExportUtil(self.atoms,
self.bonds,
self.braille,
self.objs,
self.ren,
self.fileInfo,
self.operation,
self.scale)
QObject.connect(self.outThread,SIGNAL("progress"),self.setValue,Qt.AutoConnection)
self.connect(self.outThread,SIGNAL("progressLabel"),self.setLabelText,Qt.AutoConnection)
self.connect(self.outThread,SIGNAL('closeProgress'),self.close,Qt.AutoConnection)
self.connect(self.outThread,SIGNAL('buttonLabel'),self.setCancelButtonText,Qt.AutoConnection)
self.canceled.connect(self.cancel)
self.outThread.start()
self.exec_()
def cancel(self):
if self.outThread.isRunning():
self.outThread.exit()
class ExportUtil(QThread):
def __init__(self,*args):
super(ExportUtil,self).__init__()
self.atoms = args[0]
self.bonds = args[1]
self.glyphs = args[2]
self.objs = args[3]
self.ren = args[4]
self.fileInfo = args[5]
self.operation = args[6]
self.scale = args[7]
self.maxRng = len(self.atoms) + len(self.bonds) + 2*len(self.glyphs)
#print(self.glyphs)
#for i in self.objs:
# print(i)
# self.actualglyphs = []
# for i in self.glyphs:
# for j in i[1].center:
# sph = vtkSphereSource()
# sph.SetRadius(i[1].radius)
# sph.SetCenter(j[0],j[1],j[2])
#
# for i in self.glyphs:
# print('--- ',i[1].radius)
# for j in i[1].center:
# print(j)
#print(i[1].dots.GetOutput().ComputeBounds())
def run(self):
if self.operation == "separated":
if self.fileInfo[1][1:] == ".stl":
anum = 0
for i in self.objs:
anum += 1
fileName = self.fileInfo[0]+str(anum)+self.fileInfo[1][1:]
writer = vtkSTLWriter()
writer.SetFileName(fileName)
writer.SetInputConnection(i.GetOutputPort())
writer.Write()
# elif self.fileInfo[1][1:] == ".obj":
# anum = 0
# for i in self.atoms:
# anum += 1
# filePrefix = self.fileInfo[0]+str(anum)
#
# ren = vtkRenderer()
# renwin=vtkRenderWindow()
# renwin.AddRenderer(ren)
# ren.AddActor(i[1])
#
# writer = vtkOBJExporter()
# writer.SetFilePrefix(filePrefix)
# writer.SetInput(renwin)
# writer.Write()
#
# elif self.fileInfo[1][1:] == ".vrml":
# anum = 0
# for i in self.atoms:
# anum += 1
# fileName = self.fileInfo[0]+str(anum)+self.fileInfo[1][1:]
#
# ren = vtkRenderer()
# renwin=vtkRenderWindow()
# renwin.AddRenderer(ren)
# ren.AddActor(i[1])
#
# writer = vtkVRMLExporter()
# writer.SetFileName(fileName)
# writer.SetInput(renwin)
# writer.Write()
# for i in self.bonds:
# anum += 1
# fileName = self.fileInfo[0]+str(anum)+self.fileInfo[1][1:]
#
# ren = vtkRenderer()
# renwin=vtkRenderWindow()
# renwin.AddRenderer(ren)
# ren.AddActor(i[0])
#
# writer = vtkVRMLExporter()
# writer.SetFileName(fileName)
# writer.SetInput(renwin)
# writer.Write()
QCoreApplication.processEvents()
elif self.operation == "merged":
if self.fileInfo[1][1:] == ".stl":
writer = vtkSTLWriter()
if not self.fileInfo[0][-4:] == '.stl':
writer.SetFileName(self.fileInfo[0]+self.fileInfo[1][1:])
elif self.fileInfo[0][-4:] == '.stl':
writer.SetFileName(self.fileInfo[0])
newData = vtkAppendPolyData()
for i in self.objs:
i.Update()
newData.AddInputConnection(i.GetOutputPort())
transform = vtkTransform()
transform.Scale(self.scale,self.scale,self.scale)
transPoly = vtkTransformPolyDataFilter()
transPoly.SetTransform(transform)
transPoly.SetInputConnection(newData.GetOutputPort())
transPoly.Update()
writer.SetInputConnection(transPoly.GetOutputPort())
writer.Write()
# elif self.fileInfo[1][1:] == ".obj":
# ren = vtkRenderer()
# outRenWindow = vtkRenderWindow()
# outRenWindow.AddRenderer(ren)
# atomActors = [i[1] for i in self.atoms]
# bondActors = [i[0] for i in self.bonds]
# glyphActors = [i.actors[0] for i in self.glyphs]+\
# [i.actors[1] for i in self.glyphs]
#
# for i in atomActors:
# data = i.GetMapper().GetInput()
# transform = vtkTransform()
# transform.Scale(self.scale,self.scale,self.scale)
#
# transPoly = vtkTransformPolyDataFilter()
# transPoly.SetTransform(transform)
# transPoly.SetInputData(data)
# transPoly.Update()
# mapper = vtkPolyDataMapper()
# mapper.SetInputConnection(transPoly.GetOutputPort())
# actor = vtkActor()
# actor.SetMapper(mapper)
# ren.AddActor(i)
# for i in bondActors:
# data = i.GetMapper().GetInput()
# transform = vtkTransform()
# transform.Scale(self.scale,self.scale,self.scale)
#
# transPoly = vtkTransformPolyDataFilter()
# transPoly.SetTransform(transform)
# transPoly.SetInputData(data)
# transPoly.Update()
# mapper = vtkPolyDataMapper()
# mapper.SetInputConnection(transPoly.GetOutputPort())
# actor = vtkActor()
# actor.SetMapper(mapper)
# ren.AddActor(i)
# for i in glyphActors:
# data = i.GetMapper().GetInput()
# transform = vtkTransform()
# transform.Scale(self.scale,self.scale,self.scale)
#
# transPoly = vtkTransformPolyDataFilter()
# transPoly.SetTransform(transform)
# transPoly.SetInputData(data)
# transPoly.Update()
# mapper = vtkPolyDataMapper()
# mapper.SetInputConnection(transPoly.GetOutputPort())
# actor = vtkActor()
# actor.SetMapper(mapper)
# ren.AddActor(i)
#
# writer = vtkOBJExporter()
# writer.SetFilePrefix(self.fileInfo[0][:-4])
# writer.SetInput(outRenWindow)
# writer.Write()
# elif self.fileInfo[1][1:] == ".vrml":
# ren = vtkRenderer()
# outRenWindow = vtkRenderWindow()
# outRenWindow.AddRenderer(ren)
# atomActors = (i[1] for i in self.atoms)
# bondActors = (i[0] for i in self.bonds)
# glyphActors = [i.actors[0] for i in self.glyphs]+\
# [i.actors[1] for i in self.glyphs]
#
# for i in atomActors:
# data = i.GetMapper().GetInput()
# transform = vtkTransform()
# transform.Scale(self.scale,self.scale,self.scale)
#
# transPoly = vtkTransformPolyDataFilter()
# transPoly.SetTransform(transform)
# transPoly.SetInputData(data)
# transPoly.Update()
# mapper = vtkPolyDataMapper()
# mapper.SetInputConnection(transPoly.GetOutputPort())
# actor = vtkActor()
# actor.SetMapper(mapper)
# ren.AddActor(i)
# for i in bondActors:
# data = i.GetMapper().GetInput()
# transform = vtkTransform()
# transform.Scale(self.scale,self.scale,self.scale)
#
# transPoly = vtkTransformPolyDataFilter()
# transPoly.SetTransform(transform)
# transPoly.SetInputData(data)
# transPoly.Update()
# mapper = vtkPolyDataMapper()
# mapper.SetInputConnection(transPoly.GetOutputPort())
# actor = vtkActor()
# actor.SetMapper(mapper)
# ren.AddActor(i)
# for i in glyphActors:
# data = i.GetMapper().GetInput()
# transform = vtkTransform()
# transform.Scale(self.scale,self.scale,self.scale)
#
# transPoly = vtkTransformPolyDataFilter()
# transPoly.SetTransform(transform)
# transPoly.SetInputData(data)
# transPoly.Update()
# mapper = vtkPolyDataMapper()
# mapper.SetInputConnection(transPoly.GetOutputPort())
# actor = vtkActor()
# actor.SetMapper(mapper)
# ren.AddActor(i)
#
# writer = vtkVRMLExporter()
# writer.SetFileName(self.fileInfo[0])
# writer.SetInput(outRenWindow)
# writer.Write()
QCoreApplication.processEvents()
elif self.operation == "boolean":
self.emit(SIGNAL('progressLabel'),'Applying boolean operations, This may take some time ...')
boolStruct = vtkBooleanOperationPolyDataFilter()
boolStruct.SetOperationToUnion()
boolStruct.SetInputConnection(0,self.objs[0].GetOutputPort())
boolStruct.SetInputConnection(1,self.objs[1].GetOutputPort())
boolStruct.Update()
currVal = 1
self.emit(SIGNAL('progress'),currVal)
QCoreApplication.processEvents()
self.msleep(10)
for i in range(2,len(self.objs)):
NewBoolStruct = vtkBooleanOperationPolyDataFilter()
NewBoolStruct.SetOperationToUnion()
NewBoolStruct.SetInputConnection(0,boolStruct.GetOutputPort())
NewBoolStruct.SetInputConnection(1,self.objs[i].GetOutputPort())
# NewBoolStruct.Update()
clean = vtkCleanPolyData()
clean.SetInputConnection(NewBoolStruct.GetOutputPort())
# clean.Update()
normals = vtkPolyDataNormals()
# normals.SetInputConnection(NewBoolStruct.GetOutputPort())
normals.SetInputConnection(clean.GetOutputPort())
# normals.SetInputConnection(repair.GetOutputPort())
normals.ConsistencyOff()
normals.SplittingOn()
normals.ComputeCellNormalsOn()
normals.ComputePointNormalsOn()
normals.Update()
boolStruct = normals
currVal += 1
self.emit(SIGNAL('progress'),currVal)
QCoreApplication.processEvents()
self.msleep(10)
transform = vtkTransform()
transform.Scale(self.scale,self.scale,self.scale)
transPoly = vtkTransformPolyDataFilter()
transPoly.SetTransform(transform)
transPoly.SetInputConnection(boolStruct.GetOutputPort())
transPoly.Update()
if self.fileInfo[1][1:] == ".stl":
writer = vtkSTLWriter()
if not self.fileInfo[0][-4:] == '.stl':
writer.SetFileName(str(self.fileInfo[0]+self.fileInfo[1][1:]))
elif self.fileInfo[0][-4:] == '.stl':
writer.SetFileName(str(self.fileInfo[0]))
# writer.SetInputConnection(normals.GetOutputPort())
# writer.SetInputConnection(boolStruct.GetOutputPort())
writer.SetInputConnection(transPoly.GetOutputPort())
# writer.SetInputConnection(repair.GetOutputPort())
writer.Write()
# elif self.fileInfo[1][1:] == ".obj":
# ren = vtkRenderer()
# outRenWindow = vtkRenderWindow()
# outRenWindow.AddRenderer(ren)
# mapper = vtkPolyDataMapper()
# mapper.SetInputConnection(transPoly.GetOutputPort())
# actor = vtkActor()
# actor.SetMapper(mapper)
# ren.AddActor(actor)
# writer = vtkOBJExporter()
# writer.SetFilePrefix(self.fileInfo[0][:-4])
# writer.SetInput(outRenWindow)
# writer.Write()
# elif self.fileInfo[1][1:] == ".vrml":
# ren = vtkRenderer()
# outRenWindow = vtkRenderWindow()
# outRenWindow.AddRenderer(ren)
# mapper = vtkPolyDataMapper()
# mapper.SetInputConnection(transPoly.GetOutputPort())
# actor = vtkActor()
# actor.SetMapper(mapper)
# ren.AddActor(actor)
# writer = vtkVRMLExporter()
# writer.SetFileName(self.fileInfo[0])
# writer.SetInput(outRenWindow)
# writer.Write()
self.emit(SIGNAL('progress'),self.maxRng)
self.emit(SIGNAL('buttonLabel'),'Close')
self.emit(SIGNAL('progressLabel'),'Completed.')
self.exit()
self.exec_()
|
jessedsmith/Arcte
|
modules/exportdialog.py
|
Python
|
gpl-3.0
| 21,348
|
[
"VTK"
] |
fec14b55a12bc33a51a5a7985f2cc8b95f540a97d180dee6d82933fd676f72c5
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.misc.ned Contains the NED class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import re
from collections import defaultdict
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# Import astronomical modules
from astroquery.ned import Ned
from astroquery import nasa_ads as ads
# Import the relevant PTS classes and modules
from ...core.basics.log import log
from ...core.tools import filesystem as fs
from ...core.filter.filter import parse_filter
from ...core.filter.narrow import NarrowBandFilter
from ...core.basics.configurable import Configurable
from ...core.tools import formatting as fmt
from ...core.tools import network
from ...core.tools import introspection
# -----------------------------------------------------------------
def get_image(galaxy_name, fltr, year=None):
"""
This function ...
:return:
"""
filter_name = str(fltr)
# Inform the user
log.info("Looking for images of '" + galaxy_name + "' in the '" + filter_name + "' band ...")
# Configure
ned = NED()
ned.config.galaxy = galaxy_name
ned.config.filter = filter_name
ned.config.unknown = False
ned.config.show = False
# Run
ned.run()
#
if year is None:
last_year = None
for bibcode, image_year, image_url in ned.images[filter_name]:
if last_year is None or image_year > last_year: last_year = image_year
year = last_year
# Debugging
log.debug("Most recent image encountered is from " + str(year))
#print(ned.images.keys())
#print(ned.images[filter_name])
# List of possible urls
urls = []
# Look in the found images
for bibcode, image_year, image_url in ned.images[filter_name]:
if image_year == year: urls.append(image_url)
# Check the number of urls
if len(urls) == 0: raise RuntimeError("No images found")
if len(urls) > 1: log.warning("Multiple images found: taking the first")
url = urls[0]
# Download to temporary path
#filepath = network.download_and_decompress_file(url, introspection.pts_temp_dir, remove=True, progress_bar=True)
filepath = network.download_file(url, introspection.pts_temp_dir, progress_bar=True)
# RENAME: REMOVE THE .GZ!
filepath = fs.remove_extension(filepath)
from ..core.frame import Frame
# Open the image
frame = Frame.from_file(filepath)
# Remove the file
fs.remove_file(filepath)
# Set the filter
frame.filter = fltr
# Return the frame
return frame
# -----------------------------------------------------------------
class NED(Configurable):
"""
This class ..
"""
def __init__(self, *args, **kwargs):
"""
This function ...
:param kwargs:
"""
# Call the constructor of the base class
super(NED, self).__init__(*args, **kwargs)
# Image info
self.images = defaultdict(list)
# Images of unknown filters
self.unknown = []
# A regular expression object that strips away special unicode characters, used on the remote console output
self.ansi_escape = re.compile(r'\x1b[^m]*m')
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Find the images
self.find()
# 3. List the images
if self.config.show: self.show()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(NED, self).setup(**kwargs)
# -----------------------------------------------------------------
def find(self):
"""
This function ...
:return:
"""
# Get the list
urls = Ned.get_image_list(self.config.galaxy)
images = []
# Print the list
for url in urls:
# Get the name
name = fs.strip_extension(fs.strip_extension(fs.name(url))) # strip both the .gz as the .fits extension
# Get the bibcode
try: bibcode = url.split("img/")[1].split("/")[0]
except IndexError: bibcode = None
if ":" in name:
splitted = name.split(":")
if splitted[0].startswith("NGC_"):
band = splitted[0].split("NGC_")[1][5:]
try:
filter = parse_filter(band)
splitted = [self.config.galaxy, None, band, splitted[1]]
except: pass
if len(splitted) == 3:
splitted = [self.config.galaxy, None, splitted[1], splitted[2]]
elif len(splitted) == 2:
info_and_band = splitted[0].split("NGC_")[1][5:]
splitted = [self.config.galaxy, None, info_and_band, splitted[1]]
galaxy_name = splitted[0]
unknown = splitted[1]
band = splitted[2]
source = splitted[3]
try:
year = int(source[-4:])
if year < 1985: continue
except ValueError: year = None
images.append((band, year, bibcode, url))
elif "_" in name:
splitted = name.split("_")
band = splitted[-1]
images.append((band, None, bibcode, url))
elif "." in name:
splitted = name.split(".")
galaxy_name = splitted[0]
images.append((None, None, bibcode, url))
# Print
for band, year, bibcode, url in images:
if band is None: fltr = None
elif "Ha" in band or "H-alpha" in band or "H_alph" in band: fltr = NarrowBandFilter("Ha")
else:
try: fltr = parse_filter(band)
except ValueError: fltr = None
#print(fltr, year, bibcode, url)
if fltr is None:
self.unknown.append((bibcode, year, url))
else:
fltrstring = str(fltr)
# Add to the images dictionary
self.images[fltrstring].append((bibcode, year, url))
# -----------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing the list of images ...")
# No filter was specified
if self.config.filter is not None:
if str(self.config.filter) not in self.images: log.error("No images for this filter are found")
else:
print(fmt.green + fmt.bold + "Found images: (" + str(len(self.images[str(self.config.filter)])) + ")" + fmt.reset)
print("")
for bibcode, year, url in self.images[str(self.config.filter)]:
name = fs.name(url)
results = ads.ADS.query_simple(bibcode)
if len(results) == 0:
log.warning("Getting the info for BIBCODE " + str(bibcode) + " failed")
authorstring = None
title = None
journal = None
citations = None
else:
authors = results["authors"][0]
if len(authors) == 1: authorstring = authors[0]
elif len(authors) == 2: authorstring = "and".join(authors)
else: authorstring = authors[0] + " et al."
title = results["title"][0][0]
journal = results["journal"][0][0].split(",")[0]
try: citations = results["citations"][0][0]
except IndexError: citations = None
print(fmt.underlined + name + fmt.reset)
print("")
if year is not None: print(" * year:", year)
if title is not None: print(" * title:", title)
if journal is not None: print(" * journal:", journal)
if citations is not None: print(" * citations:", citations)
if authorstring is not None: print(" * authors:", authorstring)
print(" * url:", url)
print("")
else:
# List known
self.list_filters()
# List unknown
if self.config.unknown: self.list_unknown()
# -----------------------------------------------------------------
def list_filters(self):
"""
This function ...
:return:
"""
# Loop over the filters
for fltrstring in self.sorted_filter_names:
print(fmt.bold + fmt.green + fltrstring + ": (" + str(len(self.images[fltrstring])) + ")" + fmt.reset)
print("")
for bibcode, year, url in self.images[fltrstring]:
name = fs.name(url)
results = ads.ADS.query_simple(bibcode)
if len(results) == 0:
log.warning("Getting the info for BIBCODE " + str(bibcode) + " failed")
authorstring = None
title = None
journal = None
citations = None
else:
authors = results["authors"][0]
if len(authors) == 1: authorstring = authors[0]
elif len(authors) == 2: authorstring = "and".join(authors)
else: authorstring = authors[0] + " et al."
title = results["title"][0][0]
journal = results["journal"][0][0].split(",")[0]
try: citations = results["citations"][0][0]
except IndexError: citations = None
print(fmt.underlined + name + fmt.reset)
print("")
if year is not None: print(" * year:", year)
if title is not None: print(" * title:", title)
if journal is not None: print(" * journal:", journal)
if citations is not None: print(" * citations:", citations)
if authorstring is not None: print(" * authors:", authorstring)
print(" * url:", url)
print("")
# -----------------------------------------------------------------
def list_unknown(self):
"""
This function ...
:return:
"""
print(fmt.red + fmt.bold + "Unknown filters: (" + str(len(self.unknown)) + ")" + fmt.reset)
print("")
for bibcode, year, url in self.unknown:
name = fs.name(url)
results = ads.ADS.query_simple(bibcode)
if len(results) > 0:
authors = results["authors"][0]
if len(authors) == 1: authorstring = authors[0]
elif len(authors) == 2: authorstring = "and".join(authors)
else: authorstring = authors[0] + " et al."
title = results["title"][0][0]
journal = results["journal"][0][0].split(",")[0]
try: citations = results["citations"][0][0]
except IndexError: citations = None
else: title = journal = citations = authorstring = None
#print(type(title))
#print(str(title))
#print(self.ansi_escape.sub('', str(title)).replace('\x1b[K', '').split("\r\n")[1:-1])
#print(title.replace("\u2014", "")) if title is not None else print("")
#print(title.astype('U')) if title is not None else print("")
#print(title.encode('utf-8')) if title is not None else print("")
print(fmt.underlined + name + fmt.reset)
print("")
if year is not None: print(" * year:", year)
if title is not None: print(" * title:", title)
if journal is not None: print(" * journal:", journal)
if citations is not None: print(" * citations:", citations)
if authorstring is not None: print(" * authors:", authorstring)
print(" * url:", url)
print("")
# -----------------------------------------------------------------
@property
def sorted_filter_names(self):
"""
This function ...
:return:
"""
names = sorted(self.images.keys(), key=lambda key: parse_filter(key).pivot)
return names
# -----------------------------------------------------------------
|
SKIRT/PTS
|
magic/services/ned.py
|
Python
|
agpl-3.0
| 13,315
|
[
"Galaxy"
] |
be174855715aef76470749019f04e0ba5412d908e251c8501b27cd201a4e40db
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import unittest
import MooseDocs
from MooseDocs.common.MooseLinkDatabase import MooseLinkDatabase
class TestMooseLinkDatabase(unittest.TestCase):
"""
Tests for MooseLinkDatabase object.
"""
@classmethod
def setUpClass(cls):
"""
Create link database.
"""
config = MooseDocs.load_config(os.path.join(MooseDocs.MOOSE_DIR, 'docs', 'website.yml'))
options = config['MooseDocs.extensions.app_syntax']
cls.database = MooseLinkDatabase(repo=options['repo'], links=options['links'])
def testTests(self):
"""
Look for class in input files.
"""
# The BoxMarker object is something that is nested and not usually listed first, so it is a
# good test case that the regex is getting down into the nested items.
self.assertIn('BoxMarker', self.database.inputs['Tests'],
'BoxMarker not located in database!')
def testExamples(self):
"""
The ExampleDiffusion class should be in input files and Kernel inherited from.
"""
self.assertIn('ExampleDiffusion', self.database.inputs['Examples'],
'ExampleDiffusion was not found in example input files!')
self.assertIn('Kernel', self.database.children['Examples'],
'Kernel was not used in the example source code!')
def testTutorials(self):
"""
Test the tutorials directory is properly searched.
"""
self.assertIn('DarcyPressure', self.database.inputs['Tutorials'],
'DarcyPressue was not found in tutorial input files!')
self.assertIn('Diffusion', self.database.children['Tutorials'],
'Diffusion was not used in the tutorial source code!')
def testSource(self):
"""
Test the tutorials directory is properly searched.
"""
self.assertIn('Diffusion', self.database.children['Source'],
'Diffusion was not found in source code.!')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
liuwenf/moose
|
python/MooseDocs/tests/common/database/test_link_database.py
|
Python
|
lgpl-2.1
| 3,508
|
[
"MOOSE"
] |
f101a4104d072d534a419dbba58d7b1af4a9db2da9f4f68fea940d547c52052d
|
#-------------------------------------------------------------------------------
# coding=utf8
# Name: 模块1
# Purpose:
#
# Author: zhx
#
# Created: 19/05/2016
# Copyright: (c) zhx 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import numpy as np
from sklearn import svm
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
def main():
traindata = open("trainnew.txt")
testdata = open("testnew.txt")
traindata.readline() # 跳过第一行
testdata.readline()
train = np.loadtxt(traindata)
test = np.loadtxt(testdata)
X = train[0:4628,0:27]
y = train[0:4628,27]
test_x = test[0:1437,0:27]
test_y = test[0:1437,27]
model1 = LinearSVC()
model2 = LogisticRegression()
model3 = GaussianNB()
model4 = RandomForestClassifier()
model5 = KNeighborsClassifier()
model1.fit(X,y)
model2.fit(X,y)
model3.fit(X,y)
model4.fit(X,y)
model5.fit(X,y)
predicted1 = model1.predict(test_x)
predicted2 = model2.predict(test_x)
predicted3 = model3.predict(test_x)
predicted4 = model4.predict(test_x)
predicted5 = model5.predict(test_x)
classname = ['popular','not_popular']
print "1 Svm-linear"
print(classification_report(test_y,predicted1))#,classname))
print "2 Logistci regression"
print(classification_report(test_y,predicted2))#,classname))
print "3 NB - gaussian"
print(classification_report(test_y,predicted3))#,classname))
print "4 Random Forest"
print(classification_report(test_y,predicted4))#,classname))
print "5 KNN"
print(classification_report(test_y,predicted5))#,classname))
main()
|
vimilimiv/weibo-popularity_judge-and-content_optimization
|
分类和回归/svm.py
|
Python
|
mit
| 1,941
|
[
"Gaussian"
] |
ce9c5ddbb3cbd7f34335ee0164263bc4986783574985981b70183a12dbd07f6f
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e')
mug = chigger.exodus.ExodusResult(reader, variable='diffused', cmap_reverse=True)
cbar = chigger.exodus.ExodusColorBar(mug)
window = chigger.RenderWindow(mug, cbar, size=[300,300], test=True)
window.write('reverse_default.png')
window.start()
|
nuclear-wizard/moose
|
python/chigger/tests/colormap/reverse_default.py
|
Python
|
lgpl-2.1
| 691
|
[
"MOOSE"
] |
047180fb4ff539d9342c0a9212374a0758aa55aceb6ac276ecbbff440f4f0b70
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
import procmemory
# 2018.08: Weave not wrapped
def drawVertices(myscreen, weave, vertexType, vertexRadius, vertexColor):
pts = weave.getVertices( vertexType )
print(" got ",len(pts)," of type ", vertexType)
for p in pts:
myscreen.addActor( camvtk.Sphere(center=(p.x,p.y,p.z), radius=vertexRadius, color=vertexColor ) )
if __name__ == "__main__":
print(ocl.version())
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../stl/demo.stl")
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
myscreen.addActor(stl)
stl.SetWireframe()
#stl.SetSurface()
stl.SetColor(camvtk.grey)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
cutter = ocl.CylCutter(0.3, 5)
#cutter = ocl.BallCutter(0.4, 5)
#cutter = ocl.BullCutter(0.4, 0.1, 5)
print("fiber...")
fiber_range=30
Nmax = 400
yvals = [float(n-float(Nmax)/2)/Nmax*float(fiber_range) for n in range(0,Nmax+1)]
xvals = [float(n-float(Nmax)/2)/Nmax*float(fiber_range) for n in range(0,Nmax+1)]
zvals=[ 1.6523]
bpc_x = ocl.BatchPushCutter()
bpc_y = ocl.BatchPushCutter()
bpc_x.setXDirection()
bpc_y.setYDirection()
bpc_x.setSTL(s)
bpc_y.setSTL(s)
bpc_x.setCutter(cutter)
bpc_y.setCutter(cutter)
# create fibers
for zh in zvals:
for y in yvals:
f1 = ocl.Point(-15.5,y,zh) # start point of fiber
f2 = ocl.Point(15.5,y,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
bpc_x.appendFiber(f)
for x in xvals:
f1 = ocl.Point(x,-15.5,zh) # start point of fiber
f2 = ocl.Point(x,15.5,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
bpc_y.appendFiber(f)
# run
bpc_x.run()
bpc_y.run()
xfibers = bpc_x.getFibers()
yfibers = bpc_y.getFibers()
fibers = xfibers+yfibers
print(" got ",len(xfibers)," xfibers")
print(" got ",len(yfibers)," yfibers")
print("rendering fibers and CL-points.")
w = ocl.Weave()
print("push fibers to Weave...",)
for f in fibers:
w.addFiber(f)
print("done.")
print("Weave build()...",)
mem1 = procmemory.resident()
print("before ", mem1)
w.build()
#w.build2()
mem2 = procmemory.resident()
print("after ", float(mem2)/float(1024*1024), " MB")
print(" build() memory: ",float(mem2-mem1)/float(1024*1024)," MB")
print("done")
print("face_traverse...")
w.face_traverse()
print("done.")
w_clpts = w.getCLVertices()
w_ipts = w.getINTVertices()
w_edges = w.getEdges()
w_loop = w.getLoops()
vertexRadius = 0.007
drawVertices(myscreen, w, ocl.WeaveVertexType.CL, vertexRadius, camvtk.red)
drawVertices(myscreen, w, ocl.WeaveVertexType.INT, vertexRadius, camvtk.orange)
drawVertices(myscreen, w, ocl.WeaveVertexType.FULLINT, vertexRadius, camvtk.yellow)
drawVertices(myscreen, w, ocl.WeaveVertexType.ADJ, vertexRadius, camvtk.green)
drawVertices(myscreen, w, ocl.WeaveVertexType.TWOADJ, vertexRadius, camvtk.lblue)
print(" got: ", len(w_edges), " edges")
print(" got: ", len(w_loop), " loops")
# draw the loops
nloop = 0
for lop in w_loop:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=camvtk.yellow) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=camvtk.yellow) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=camvtk.yellow) )
previous=p
n=n+1
print("rendered loop ",nloop, " with ", len(lop), " points")
nloop = nloop+1
# draw edges of weave
ne = 0
zoffset=0.0 # 1
dzoffset = 0.000 # 5
for e in w_edges:
p1 = e[0]
p2 = e[1]
myscreen.addActor( camvtk.Line( p1=( p1.x,p1.y,p1.z+zoffset+ne*dzoffset), p2=(p2.x,p2.y,p2.z+zoffset+ne*dzoffset) ) )
ne = ne+1
print("done.")
myscreen.camera.SetPosition(0.8051, 0.8051, 3.5)
myscreen.camera.SetFocalPoint(0.805, 0.805, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
aewallin/opencamlib
|
examples/python/fiber/fiber_16_weave2_STL.py
|
Python
|
lgpl-2.1
| 4,894
|
[
"VTK"
] |
eedd644e3013a0132a64dfd853ccbb4c9d19d565c7364bd19ac5f4988eed458d
|
# This file is part of OpenHatch.
# Copyright (C) 2010, 2011 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import mysite.base.unicode_sanity
import lxml.html
import urllib
import urllib2
import cStringIO as StringIO
import re
import simplejson
import datetime
import collections
import logging
import urlparse
import hashlib
import xml.etree.ElementTree as ET
import xml.parsers.expat
from django.utils.encoding import force_unicode
import mysite.search.models
import mysite.profile.models
import mysite.base.helpers
import twisted.web
### Generic error handler
class ProfileImporter(object):
SQUASH_THESE_HTTP_CODES = []
def errbackOfLastResort(self, error):
# So the error is something intense -- probably, an actual code bug.
#
# The important thing to do is to log the error somewhere in an attempt to make
# it easier to debug.
#
# Twisted errors have a printDetailedTraceback() method
message = """Uh oh. Asynchronous code hit an unhandled exception.
Here is the details:
"""
buf = StringIO.StringIO()
error.printDetailedTraceback(file=buf)
message += buf.getvalue()
# Send an email to the admins
from django.core.mail import mail_admins
mail_admins(subject="Async error on the site",
message=message,
fail_silently=True)
# fail_silently since exceptions here would be bad.
def squashIrrelevantErrors(self, error):
### The way we set up the callbacks and errbacks, this function should process any
### exceptions raised by the actual callback.
###
### It's important that this function never raise an error.
###
### Once it returns successfully, the DIA processing will continue, and the DIA will
### get marked as "completed". So if this function raises an exception, then the DIA
### will never get marked as "completed", and then users would be staring at progress
### bars for ever and ever!
squash_it = False
if error.type == twisted.web.error.Error:
if error.value.status in self.SQUASH_THESE_HTTP_CODES:
# The username doesn't exist. That's okay. It just means we have gleaned no profile information
# from this query.
squash_it = True
if squash_it:
pass
else:
# This is low-quality logging for now!
logging.warn("EEK: " + error.value.status + " " + error.value.response)
else:
self.errbackOfLastResort(error)
def __init__(self, query, dia_id, command):
## First, store the data we are passed in.
self.query = query
self.dia_id = dia_id
self.command = command
## Then, create a mapping for storing the URLs we are waiting on.
self.urls_we_are_waiting_on = collections.defaultdict(int)
def get_dia(self):
"""We have this method so to avoid holding on to any objects from
the database for a long time.
Event handler methods should use a fresh database object while they
run, rather than using some old object that got created when the class
was instantiated.
"""
return mysite.profile.models.DataImportAttempt.objects.get(
id=self.dia_id)
def markThatTheDeferredFinished(self, url):
self.urls_we_are_waiting_on[url] -= 1
# If we just made the state totally insane, then log a warning to that effect.
if self.urls_we_are_waiting_on[url] < 0:
logging.error("Eeek, " + url + " went negative.")
if self.seems_finished():
# Grab the DataImportAttempt object, and mark it as completed.
dia = self.get_dia()
dia.completed = True
dia.save()
# Finally, if there is more work to do, enqueue it.
self.command.create_tasks_from_dias(max=1)
def seems_finished(self):
if sum(self.urls_we_are_waiting_on.values()) == 0:
return True
return False
def handleError(self, failure):
# FIXME: Use Django default exception logic to make an email get sent.
import logging
logging.warn(failure)
### This section imports projects from github.com
class ImportActionWrapper(object):
# This class serves to hold three things:
# * the URL we requested, and
# * the ProfileImporter object that caused the URL to be requested.
# * Function to call.
#
# The point of this wrapper is that we call that function for you, and
# afterward, we call .markThatTheDeferredFinished() on the ProfileImporter.
#
# That way, the ProfileImporter can update its records of which URLs have finished
# being processed.
def __init__(self, url, pi, fn):
self.url = url
self.pi = pi
self.fn = fn
def __call__(self, *args, **kwargs):
# Okay, so we call fn and pass in arguments to it.
value = self.fn(*args, **kwargs)
# Then we tell the ProfileImporter that we have handled the URL.
self.pi.markThatTheDeferredFinished(self.url)
return value
class GithubImporter(ProfileImporter):
def squashIrrelevantErrors(self, error):
squash_it = False
if error.type == twisted.web.error.Error:
if error.value.status == '404':
# The username doesn't exist. That's okay. It just means we have gleaned no profile information
# from this query.
squash_it = True
if error.value.status == '401' and error.value.response == '{"error":"api route not recognized"}':
# This is what we get when we query e.g. http://github.com/api/v2/json/repos/show/asheesh%40asheesh.org
# It just means that Github decided that [email protected] is not a valid username.
# Just like above -- no data to return.
squash_it = True
if squash_it:
pass
else:
# This is low-quality logging for now!
logging.warn("EEK: " + error.value.status + " " + error.value.response)
else:
raise error.value
# This method takes a repository dict as returned by Github
# and creates a Citation, also creating the relevant
# PortfolioEntry if necessary.
def addCitationFromRepoDict(self, repo_dict, override_contrib=None):
# Get the DIA whose ID we stored
dia = self.get_dia()
person = dia.person
# Get or create a project by this name
(project, _) = mysite.search.models.Project.objects.get_or_create(
name=repo_dict['name'])
# Look and see if we have a PortfolioEntry. If not, create
# one.
if mysite.profile.models.PortfolioEntry.objects.filter(person=person, project=project).count() == 0:
portfolio_entry = mysite.profile.models.PortfolioEntry(person=person,
project=project,
project_description=repo_dict['description'] or '')
portfolio_entry.save()
# Either way, it is now safe to get it.
portfolio_entry = mysite.profile.models.PortfolioEntry.objects.filter(person=person, project=project)[0]
citation = mysite.profile.models.Citation()
citation.languages = "" # FIXME ", ".join(result['languages'])
# Fill out the "contributor role", either by data we got
# from the network, or by special arguments to this
# function.
if repo_dict['fork']:
citation.contributor_role = 'Forked'
else:
citation.contributor_role = 'Started'
if override_contrib:
citation.contributor_role = override_contrib
citation.portfolio_entry = portfolio_entry
citation.data_import_attempt = dia
citation.url = 'http://github.com/%s/%s/' % (urllib.quote_plus(repo_dict['owner']),
urllib.quote_plus(repo_dict['name']))
citation.save_and_check_for_duplicates()
def handleUserRepositoryJson(self, json_string):
data = simplejson.loads(json_string)
if 'repositories' not in data:
return
repos = data['repositories']
# for every repository, we need to get its primary
# programming language. FIXME.
# For now we skip that.
for repo in repos:
self.addCitationFromRepoDict(repo)
person = self.get_dia().person
person.last_polled = datetime.datetime.now()
person.save()
def getUrlsAndCallbacks(self):
urls_and_callbacks = []
# Well, one thing we can do is get the repositories the user owns.
this_one = {'errback': self.squashIrrelevantErrors}
this_one['url'] = ('http://github.com/api/v2/json/repos/show/' +
mysite.base.unicode_sanity.quote(self.query))
this_one['callback'] = self.handleUserRepositoryJson
urls_and_callbacks.append(this_one)
# Another is look at the user's activity feed.
this_one = {'errback': self.squashIrrelevantErrors}
this_one['url'] = ('http://github.com/%s.json' %
mysite.base.unicode_sanity.quote(self.query))
this_one['callback'] = self.handleUserActivityFeedJson
urls_and_callbacks.append(this_one)
# Another is look at the watched list for repos the user collaborates on
# FIXME
return urls_and_callbacks
def handleUserActivityFeedJson(self, json_string):
# first, decode it
data = simplejson.loads(json_string)
# create a set that we add URLs to. This way, we can avoid
# returning duplicate URLs.
repo_urls_found = set()
for event in data:
if 'repository' not in event:
print 'weird, okay'
continue
repo = event['repository']
# Find "collaborated on..."
if event['type'] == 'PushEvent':
if repo['owner'] != self.query:
## In that case, we need to find out if the given user is in the list of collaborators
## for the repository. Normally I would call out to a different URL, but I'm supposed to
## not block.
## FIXME: return a Deferred I guess.
continue # skip the event for now
# Find "forked..."
elif event['type'] == 'ForkEvent':
if repo['owner'] != self.query:
self.addCitationFromRepoDict(repo, override_contrib='Collaborated on')
elif event['type'] == 'WatchEvent':
continue # Skip this event.
else:
logging.info("When looking in the Github user feed, I found a Github event of unknown type.")
### This section imports package lists from qa.debian.org
SECTION_NAME_AND_NUMBER_SPLITTER = re.compile(r'(.*?) [(](\d+)[)]$')
# FIXME: Migrate this to UltimateDebianDatabase or DebianDatabaseExport
class DebianQA(ProfileImporter):
SQUASH_THESE_HTTP_CODES = ['404',]
def getUrlsAndCallbacks(self):
if '@' in self.query:
email_address = self.query
else:
email_address = self.query + '@debian.org'
url = 'http://qa.debian.org/developer.php?' + mysite.base.unicode_sanity.urlencode({
u'login': unicode(email_address)})
return [ {
'url': url,
'errback': self.squashIrrelevantErrors,
'callback': self.handlePageContents } ]
def handlePageContents(self, contents):
'''contents is a string containing the data the web page contained.
'''
file_descriptor_wrapping_contents = StringIO.StringIO(contents)
parsed = lxml.html.parse(file_descriptor_wrapping_contents).getroot()
package_names = self._package_names_from_parsed_document(parsed)
self._create_citations_from_package_names(package_names)
def _package_names_from_parsed_document(self, parsed):
# for each H3 (Like "main" or "non-free" or "Non-maintainer uploads",
# grab that H3 to figure out the heading. These h3s have a table right next
# to them in the DOM.
package_names = []
for relevant_table in parsed.cssselect('h3+table'):
num_added = 0
h3 = relevant_table.getprevious()
table = relevant_table
h3_text = h3.text_content()
# this looks something like "main (5)"
section, number_of_packages = SECTION_NAME_AND_NUMBER_SPLITTER.match(h3_text).groups()
# Trim trailing whitespace
section = section.strip()
# If the section is "Non-maintainer uploads", skip it for now.
# That's because, for now, this importer is interested only in
# what packages the person maintains.
if section == 'Non-maintainer uploads':
continue
for package_bold_name in table.cssselect('tr b'):
package_name = package_bold_name.text_content()
package_description = package_bold_name.cssselect('span')[0].attrib['title']
num_added += 1
package_names.append( (package_name, package_description) )
assert num_added == int(number_of_packages)
return package_names
def _create_citations_from_package_names(self, package_names):
dia = mysite.profile.models.DataImportAttempt.objects.get(id=self.dia_id)
person = dia.person
for package_name, package_description in package_names:
(project, _) = mysite.search.models.Project.objects.get_or_create(name=package_name)
package_link = 'http://packages.debian.org/src:' + urllib.quote(
package_name)
if mysite.profile.models.PortfolioEntry.objects.filter(person=person, project=project).count() == 0:
portfolio_entry = mysite.profile.models.PortfolioEntry(person=person,
project=project,
project_description=package_description)
portfolio_entry.save()
portfolio_entry = mysite.profile.models.PortfolioEntry.objects.filter(person=person, project=project)[0]
citation = mysite.profile.models.Citation()
citation.languages = "" # FIXME ", ".join(result['languages'])
citation.contributor_role='Maintainer'
citation.portfolio_entry = portfolio_entry
citation.data_import_attempt = dia
citation.url = package_link
citation.save_and_check_for_duplicates()
# And add a citation to the Debian portfolio entry
(project, _) = mysite.search.models.Project.objects.get_or_create(name='Debian GNU/Linux')
if mysite.profile.models.PortfolioEntry.objects.filter(person=person, project=project).count() == 0:
portfolio_entry = mysite.profile.models.PortfolioEntry(person=person,
project=project,
project_description=
'The universal operating system')
portfolio_entry.save()
portfolio_entry = mysite.profile.models.PortfolioEntry.objects.filter(person=person, project=project)[0]
citation = mysite.profile.models.Citation()
citation.languages = '' # FIXME: ?
citation.contributor_role='Maintainer of %s' % package_name
citation.portfolio_entry = portfolio_entry
citation.data_import_attempt = dia
citation.url = package_link
citation.save_and_check_for_duplicates()
person.last_polled = datetime.datetime.now()
person.save()
class LaunchpadProfilePageScraper(ProfileImporter):
SQUASH_THESE_HTTP_CODES = ['404',]
def getUrlsAndCallbacks(self):
# If the query has an '@' in it, enqueue a task to
# find the username.
if '@' in self.query:
return [self.getUrlAndCallbackForEmailLookup()]
else:
return [self.getUrlAndCallbackForProfilePage()]
def getUrlAndCallbackForEmailLookup(self, query=None):
if query is None:
query = self.query
this_one = {}
this_one['url'] = ('https://api.launchpad.net/1.0/people?' +
'ws.op=find&text=' +
mysite.base.unicode_sanity.quote(
query))
this_one['callback'] = self.parseAndProcessUserSearch
this_one['errback'] = self.squashIrrelevantErrors
return this_one
def getUrlAndCallbackForProfilePage(self, query=None):
if query is None:
query = self.query
# Enqueue a task to actually get the user page
this_one = {}
this_one['url'] = ('https://launchpad.net/~' +
mysite.base.unicode_sanity.quote(query))
this_one['callback'] = self.parseAndProcessProfilePage
this_one['errback'] = self.squashIrrelevantErrors
return this_one
def parseAndProcessProfilePage(self, profile_html):
PROJECT_NAME_FIXUPS = {
'Launchpad itself': 'Launchpad',
'Debian': 'Debian GNU/Linux'}
doc_u = unicode(profile_html, 'utf-8')
tree = lxml.html.document_fromstring(doc_u)
contributions = {}
# Expecting html like this:
# <table class='contributions'>
# <tr>
# ...
# <img title='Bug Management' />
#
# It generates a list of dictionaries like this:
## {
## 'F-Spot': {
## 'url': 'http://launchpad.net/f-spot',
## 'involvement_types': ['Bug Management', 'Bazaar Branches'],
## 'languages' : ['python', 'shell script']
## }
## }
# Extract Launchpad username from page
if not tree.cssselect('#launchpad-id dd'):
return # Well, there's no launchpad ID here, so that's that.
username = tree.cssselect('#launchpad-id dd')[0].text_content().strip()
for row in tree.cssselect('.contributions tr'):
project_link = row.cssselect('a')[0]
project_name = project_link.text_content().strip()
# FIXUPs: Launchpad uses some weird project names:
project_name = PROJECT_NAME_FIXUPS.get(project_name,
project_name)
project_url_relative = project_link.attrib['href']
project_url = urlparse.urljoin('https://launchpad.net/',
project_url_relative)
involvement_types = [
i.attrib.get('title', '').strip()
for i in row.cssselect('img')]
contributions[project_name] = {
'involvement_types': set([k for k in involvement_types if k]),
'url': project_url,
'citation_url': "https://launchpad.net/~" + username,
}
# Now create Citations for those facts
for project_name in contributions:
self._save_parsed_launchpad_data_in_database(
project_name, contributions[project_name])
def _save_parsed_launchpad_data_in_database(self, project_name, result):
dia = self.get_dia()
person = dia.person
for involvement_type in result['involvement_types']:
(project, _) = mysite.search.models.Project.objects.get_or_create(name=project_name)
# This works like a 'get_first_or_create'.
# Sometimes there are more than one existing PortfolioEntry
# with the details in question.
# FIXME: This is untested.
if mysite.profile.models.PortfolioEntry.objects.filter(person=person, project=project).count() == 0:
portfolio_entry = mysite.profile.models.PortfolioEntry(person=person, project=project)
portfolio_entry.save()
portfolio_entry = mysite.profile.models.PortfolioEntry.objects.filter(person=person, project=project)[0]
citation = mysite.profile.models.Citation()
citation.contributor_role = involvement_type
citation.portfolio_entry = portfolio_entry
citation.data_import_attempt = dia
citation.url = result['citation_url']
citation.save_and_check_for_duplicates()
def parseAndProcessUserSearch(self, user_search_json):
data = simplejson.loads(user_search_json)
if data['total_size']:
entry = data['entries'][0]
else:
# No matches. How sad.
return
username = entry['name']
# Now enqueue a task to do the real work.
self.command.call_getPage_on_data_dict(self,
self.getUrlAndCallbackForProfilePage(query=username))
class BitbucketImporter(ProfileImporter):
ROOT_URL = 'http://api.bitbucket.org/1.0/'
SQUASH_THESE_HTTP_CODES = ['404',]
def getUrlsAndCallbacks(self):
return [{
'errback': self.squashIrrelevantErrors,
'url': self.url_for_query(self.query),
'callback': self.processUserJson,
}]
def url_for_query(self, query):
url = self.ROOT_URL
url += 'users/%s/' % (mysite.base.unicode_sanity.quote(self.query))
return url
def url_for_project(self, user_name, project_name):
return 'http://bitbucket.org/%s/%s/' % (
mysite.base.unicode_sanity.quote(user_name),
mysite.base.unicode_sanity.quote(project_name))
def processUserJson(self, json_string):
person = self.get_dia().person
json_data = simplejson.loads(json_string)
bitbucket_username = json_data['user']['username']
repositories = json_data['repositories']
### The repositories list contains a sequence of dictionaries.
### The keys are:
# slug: The url slug for the project
# name: The name of the project
# website: The website associated wit the project, defined by the user
# followers_count: Number of followers
# description: The project description
for repo in repositories:
# The project name and description we pull out of the data
# provided by Bitbucket.
project_name = repo['name']
slug = repo['slug']
description = repo['description']
# Get the corresponding project object, if it exists.
(project, _) = mysite.search.models.Project.objects.get_or_create(
name=repo['slug'])
# Get the most recent PortfolioEntry for this person and that
# project.
#
# If there is no such PortfolioEntry, then set its project
# description to the one provided by Bitbucket.
portfolio_entry, _ = mysite.profile.models.PortfolioEntry.objects.get_or_create(
person=person,
project=project,
defaults={'project_description':
description.rstrip() or project_name})
# Create a Citation that links to the Bitbucket page
citation, _ = mysite.profile.models.Citation.objects.get_or_create(
url = self.url_for_project(bitbucket_username,
slug),
portfolio_entry = portfolio_entry,
defaults = dict(
contributor_role='Contributed to a repository on Bitbucket.',
data_import_attempt = self.get_dia(),
languages=''))
citation.languages = ''
citation.save_and_check_for_duplicates()
class AbstractOhlohAccountImporter(ProfileImporter):
SQUASH_THESE_HTTP_CODES = ['404',]
def convert_ohloh_contributor_fact_to_citation(self, ohloh_contrib_info, project_data):
"""Create a new Citation from a dictionary roughly representing an Ohloh ContributionFact."""
# {{{
# FIXME: Enforce uniqueness on (source, vcs_committer_identifier, project)
# Which is to say, overwrite the previous citation with the same source,
# vcs_committer_identifier and project.
# FIXME: Also store ohloh_contrib_info somewhere so we can parse it later.
# FIXME: Also store the launchpad HttpResponse object somewhere so we can parse it l8r.
# "We'll just pickle the sucker and throw it into a database column. This is going to be
# very exciting. just Hhhomphf." -- Asheesh.
permalink = self.generate_contributor_url(
project_data['url_name'],
int(ohloh_contrib_info['contributor_id']))
self.command.call_getPage_on_data_dict(
self,
{'url': permalink,
'callback': lambda _ignored:
self.convert_ohloh_contributor_fact_and_contributor_url_to_citation(ohloh_contrib_info, project_data, permalink),
'errback': lambda _ignored:
self.convert_ohloh_contributor_fact_and_contributor_url_to_citation(ohloh_contrib_info, project_data, None)})
def convert_ohloh_contributor_fact_and_contributor_url_to_citation(self, ohloh_contrib_info, project_data, contributor_url):
(project, _) = mysite.search.models.Project.objects.get_or_create(
name__iexact=project_data['name'],
defaults={'name': project_data['name']})
(portfolio_entry, _) = mysite.profile.models.PortfolioEntry.objects.get_or_create(
person=self.get_dia().person, project=project)
citation = mysite.profile.models.Citation()
citation.distinct_months = int(ohloh_contrib_info.get('man_months', 0)) or None
citation.languages = project_data.get('primary_language_nice_name', '')
citation.url = contributor_url
citation.data_import_attempt = self.get_dia()
citation.portfolio_entry = portfolio_entry
citation.save_and_check_for_duplicates()
return citation
def url_for_ohloh_query(self, url, params=None, API_KEY=None):
if API_KEY is None:
from django.conf import settings
API_KEY = settings.OHLOH_API_KEY
my_params = {u'api_key': unicode(API_KEY)}
if params:
my_params.update(params)
params = my_params ; del my_params
encoded = mysite.base.unicode_sanity.urlencode(params)
if url[-1] != '?':
url += u'?'
url += encoded
return url
def parse_ohloh_xml(self, xml_string):
try:
s = xml_string
tree = ET.parse(StringIO.StringIO(s))
except xml.parsers.expat.ExpatError:
# well, I'll be. it doesn't parse.
# There's nothing to do.
return None
# Did Ohloh return an error?
root = tree.getroot()
if root.find('error') is not None:
# FIXME: We could log this, but for now, we'll just eat it.
return None # The callback chain is over.
return tree
def xml_tag_to_dict(self, tag):
'''This method turns the input tag into a dictionary of
Unicode strings.
We use this across the Ohloh import code because I feel more
comfortable passing native Python dictionaries around, rather
than thick, heavy XML things.
(That, and dictionaries are easier to use in the test suite.'''
this = {}
for child in tag.getchildren():
if child.text:
this[unicode(child.tag)] = force_unicode(child.text)
return this
def filter_ohloh_xml(self, root, selector, many=False):
relevant_tag_dicts = []
interestings = root.findall(selector)
for interesting in interestings:
this = self.xml_tag_to_dict(interesting)
# Good, now we have a dictionary version of the XML tag.
if many:
relevant_tag_dicts.append(this)
else:
return this
if many:
return relevant_tag_dicts
def generate_contributor_url(self, project_name, contributor_id):
'''Returns either a nice, deep link into Ohloh for data on the contribution,
or None if such a link could not be made.'''
nice_url = 'https://www.ohloh.net/p/%s/contributors/%d' % (
project_name.lower(), contributor_id)
return nice_url
def filter_out_irrelevant_ohloh_dicts(self, data_dicts):
out = []
for data_dict in data_dicts:
if data_dict.get('contributor_name', '').lower() != self.query.lower():
continue # If we were asked-to, we can skip data dicts that are irrelevant.
out.append(data_dict)
return out
def parse_then_filter_then_interpret_ohloh_xml(self, xml_string, filter_out_based_on_query=True):
tree = self.parse_ohloh_xml(xml_string)
if tree is None:
return
list_of_dicts = self.filter_ohloh_xml(tree, 'result/contributor_fact', many=True)
if not list_of_dicts:
return
if filter_out_based_on_query:
list_of_dicts = self.filter_out_irrelevant_ohloh_dicts(list_of_dicts)
### Now that we have as much information as we do, we pass each dictionary
### individually to a function that creates a Citation.
###
### To do that, it might have to make some more web requests.
for data_dict in list_of_dicts:
self.get_analysis_data_then_convert(data_dict)
def get_analysis_data_then_convert(self, c_f):
url = self.url_for_ohloh_query('http://www.ohloh.net/analyses/%d.xml' % int(c_f['analysis_id']))
callback = lambda xml_string: self.parse_analysis_data_and_get_project_data_then_convert(xml_string, c_f)
errback = self.squashIrrelevantErrors
self.command.call_getPage_on_data_dict(
self,
{'url': url,
'callback': callback,
'errback': errback})
def parse_analysis_data_and_get_project_data_then_convert(self, analysis_data_xml_string, c_f):
# We are interested in grabbing the Ohloh project ID out of the project analysis
# data dump.
tree = self.parse_ohloh_xml(analysis_data_xml_string)
if tree is None:
return
analysis_data = self.filter_ohloh_xml(tree, 'result/analysis', many=False)
# Now we go look for the project data XML blob.
return self.get_project_data_then_continue(int(analysis_data['project_id']), c_f)
def get_project_data_then_continue(self, project_id, c_f):
url = self.url_for_ohloh_query('http://www.ohloh.net/projects/%d.xml' % project_id)
callback = lambda xml_string: self.parse_project_data_then_convert(xml_string, c_f)
errback = self.squashIrrelevantErrors # No error recovery available to us
self.command.call_getPage_on_data_dict(
self,
{'url': url,
'callback': callback,
'errback': errback})
def parse_project_data_then_convert(self, project_data_xml_string, c_f):
tree = self.parse_ohloh_xml(project_data_xml_string)
if tree is None:
return
project_data = self.filter_ohloh_xml(tree, 'result/project', many=False)
if not project_data:
return
self.convert_ohloh_contributor_fact_to_citation(c_f, project_data)
class RepositorySearchOhlohImporter(AbstractOhlohAccountImporter):
BASE_URL = 'http://www.ohloh.net/contributors.xml'
def getUrlsAndCallbacks(self):
url = self.url_for_ohloh_query(url=self.BASE_URL,
params={u'query': self.query})
return [{
'url': url,
'errback': self.squashIrrelevantErrors,
'callback': self.parse_then_filter_then_interpret_ohloh_xml}]
###
class OhlohUsernameImporter(AbstractOhlohAccountImporter):
def getUrlsAndCallbacksForUsername(self, username):
# First, we load download the user's profile page and look for
# (project, contributor_id) pairs.
#
# Then, eventually, we will ask the Ohloh API about each of
# those projects.
#
# It would be nice if there were a way to do this using only
# the Ohloh API, but I don't think there is.
# FIXME: Handle unicode input for username
return [{
'url': ('https://www.ohloh.net/accounts/%s' %
urllib.quote(username)),
'callback': self.process_user_page,
'errback': self.squashIrrelevantErrors}]
def getUrlsAndCallbacks(self):
# First, we load download the user's profile page and look for
# (project, contributor_id) pairs.
#
# Then, eventually, we will ask the Ohloh API about each of
# those projects.
#
# It would be nice if there were a way to do this using only
# the Ohloh API, but I don't think there is.
if '@' in self.query:
# To handle email addresses with Ohloh, all we have to do
# is turn them into their MD5 hashes.
#
# If an account with that username exists, then Ohloh will redirect
# us to the actual account page.
#
# If not, we will probably get a 404.
hasher = hashlib.md5(); hasher.update(self.query)
hashed = hasher.hexdigest()
query = hashed
else:
# If it is not an email address, we can just pass it straight through.
query = self.query
return self.getUrlsAndCallbacksForUsername(query)
def getUrlAndCallbackForProjectAndContributor(self, project_name,
contributor_id):
base_url = 'https://www.ohloh.net/p/%s/contributors/%d.xml' % (
urllib.quote(project_name), contributor_id)
# Since we know that the contributor ID truly does correspond
# to this OpenHatch user, we pass in filter_out_based_on_query=False.
#
# The parse_then_... method typically checks each Ohloh contributor_fact
# to make sure it is relevant. Since we know they are all relevant,
# we can skip that check.
callback = lambda data: self.parse_then_filter_then_interpret_ohloh_xml(data, filter_out_based_on_query=False)
return {'url': self.url_for_ohloh_query(base_url),
'callback': callback,
'errback': self.squashIrrelevantErrors}
def process_user_page(self, html_string):
root = lxml.html.parse(StringIO.StringIO(html_string)).getroot()
relevant_links = root.cssselect('a.position')
relevant_hrefs = [link.attrib['href'] for link in relevant_links if '/contributors/' in link.attrib['href']]
relevant_project_and_contributor_id_pairs = []
for href in relevant_hrefs:
url_name, contributor_id = re.split('[/][a-z]+[/]', href, 1
)[1].split('/contributors/')
self.process_url_name_and_contributor_id(
url_name, int(contributor_id))
def process_url_name_and_contributor_id(self, url_name, contributor_id):
# The Ohloh url_name that we get back is not actually enough to get the project information
# we need. So we have to first convert it into an Ohloh project id.
#
# The only way I can see to do that is to load up the project page, and look
# for the badge image URL. It contains the project ID.
#
# FIXME: Maybe if we use the "position" ID (also available through an image, namely the
# activity chart), we skip a scraping step. Who knows.
url = 'https://www.ohloh.net/p/%s' % url_name
callback = lambda page_text: self.get_project_id_then_continue(page_text, contributor_id)
errback = self.squashIrrelevantErrors # There is no going back.
self.command.call_getPage_on_data_dict(
self,
{'url': url,
'callback': callback,
'errback': errback})
def get_project_id_then_continue(self, project_page_text, contributor_id):
# First, extract the project ID from the page text
file_descriptor_wrapping_contents = StringIO.StringIO(project_page_text)
parsed = lxml.html.parse(file_descriptor_wrapping_contents).getroot()
badge_images = parsed.cssselect('.badge_preview img')
if not badge_images:
return # Oh, well.
badge_image_link = badge_images[0].attrib['src']
badge_image_link_parsed = [x for x in badge_image_link.split('/') if x]
project_id = int(badge_image_link_parsed[1])
### We'll need to get the the ContributorFact XML blob before anything else goes on
self.command.call_getPage_on_data_dict(
self,
{'url': self.url_for_ohloh_query(
'https://www.ohloh.net/p/%d/contributors/%d.xml' %
(project_id, contributor_id)),
'callback': lambda xml_string: self.parse_contributor_facts_then_continue(xml_string, project_id),
'errback': self.squashIrrelevantErrors})
def parse_contributor_facts_then_continue(self, xml_string, project_id):
tree = self.parse_ohloh_xml(xml_string)
if tree is None:
return
c_f = self.filter_ohloh_xml(tree, 'result/contributor_fact', many=False)
if not c_f:
return
# Now we go look for the project data XML blob.
return self.get_project_data_then_continue(project_id, c_f)
###
SOURCE_TO_CLASS = {
'db': DebianQA,
'bb': BitbucketImporter,
'gh': GithubImporter,
'lp': LaunchpadProfilePageScraper,
'rs': RepositorySearchOhlohImporter,
'oh': OhlohUsernameImporter,
}
|
jledbetter/openhatch
|
mysite/customs/profile_importers.py
|
Python
|
agpl-3.0
| 39,168
|
[
"exciting"
] |
db09f95c0cbf4b42269966e90b804598b469dabdf838cac5b8527eaa3c06779b
|
import threading
import time
import uuid
from common.constants import gameModes, actions
from common.log import logUtils as log
from common.ripple import userUtils
from constants import exceptions
from constants import serverPackets
from events import logoutEvent
from helpers import chatHelper as chat
from objects import glob
class token:
def __init__(self, userID, token_ = None, ip ="", irc = False, timeOffset = 0, tournament = False):
"""
Create a token object and set userID and token
:param userID: user associated to this token
:param token_: if passed, set token to that value
if not passed, token will be generated
:param ip: client ip. optional.
:param irc: if True, set this token as IRC client. Default: False.
:param timeOffset: the time offset from UTC for this user. Default: 0.
:param tournament: if True, flag this client as a tournement client. Default: True.
"""
# Set stuff
self.userID = userID
self.username = userUtils.getUsername(self.userID)
self.safeUsername = userUtils.getSafeUsername(self.userID)
self.privileges = userUtils.getPrivileges(self.userID)
self.admin = userUtils.isInPrivilegeGroup(self.userID, "developer")\
or userUtils.isInPrivilegeGroup(self.userID, "community manager")\
or userUtils.isInPrivilegeGroup(self.userID, "chat mod")
self.irc = irc
self.kicked = False
self.restricted = userUtils.isRestricted(self.userID)
self.loginTime = int(time.time())
self.pingTime = self.loginTime
self.timeOffset = timeOffset
self.streams = []
self.tournament = tournament
self.messagesBuffer = []
# Default variables
self.spectators = []
# TODO: Move those two vars to a class
self.spectating = None
self.spectatingUserID = 0 # we need this in case we the host gets DCed
self.location = [0,0]
self.joinedChannels = []
self.ip = ip
self.country = 0
self.location = [0,0]
self.awayMessage = ""
self.sentAway = []
self.matchID = -1
self.tillerino = [0,0,-1.0] # beatmap, mods, acc
self.silenceEndTime = 0
self.queue = bytes()
# Spam protection
self.spamRate = 0
# Stats cache
self.actionID = actions.IDLE
self.actionText = ""
self.actionMd5 = ""
self.actionMods = 0
self.gameMode = gameModes.STD
self.beatmapID = 0
self.rankedScore = 0
self.accuracy = 0.0
self.playcount = 0
self.totalScore = 0
self.gameRank = 0
self.pp = 0
# Generate/set token
if token_ is not None:
self.token = token_
else:
self.token = str(uuid.uuid4())
# Locks
self.processingLock = threading.Lock() # Acquired while there's an incoming packet from this user
self._bufferLock = threading.Lock() # Acquired while writing to packets buffer
self._spectLock = threading.RLock()
# Set stats
self.updateCachedStats()
# If we have a valid ip, save bancho session in DB so we can cache LETS logins
if ip != "":
userUtils.saveBanchoSession(self.userID, self.ip)
# Join main stream
self.joinStream("main")
def enqueue(self, bytes_):
"""
Add bytes (packets) to queue
:param bytes_: (packet) bytes to enqueue
"""
try:
# Acquire the buffer lock
self._bufferLock.acquire()
# Never enqueue for IRC clients or Foka
if self.irc or self.userID < 999:
return
# Avoid memory leaks
if len(bytes_) < 10 * 10 ** 6:
self.queue += bytes_
else:
log.warning("{}'s packets buffer is above 10M!! Lost some data!".format(self.username))
finally:
# Release the buffer lock
self._bufferLock.release()
def resetQueue(self):
"""Resets the queue. Call when enqueued packets have been sent"""
try:
self._bufferLock.acquire()
self.queue = bytes()
finally:
self._bufferLock.release()
def joinChannel(self, channelObject):
"""
Join a channel
:param channelObject: channel object
:raises: exceptions.userAlreadyInChannelException()
exceptions.channelNoPermissionsException()
"""
if channelObject.name in self.joinedChannels:
raise exceptions.userAlreadyInChannelException()
if not channelObject.publicRead and not self.admin:
raise exceptions.channelNoPermissionsException()
self.joinedChannels.append(channelObject.name)
self.joinStream("chat/{}".format(channelObject.name))
self.enqueue(serverPackets.channelJoinSuccess(self.userID, channelObject.clientName))
def partChannel(self, channelObject):
"""
Remove channel from joined channels list
:param channelObject: channel object
"""
self.joinedChannels.remove(channelObject.name)
self.leaveStream("chat/{}".format(channelObject.name))
def setLocation(self, latitude, longitude):
"""
Set client location
:param latitude: latitude
:param longitude: longitude
"""
self.location = (latitude, longitude)
def getLatitude(self):
"""
Get latitude
:return: latitude
"""
return self.location[0]
def getLongitude(self):
"""
Get longitude
:return: longitude
"""
return self.location[1]
def startSpectating(self, host):
"""
Set the spectating user to userID, join spectator stream and chat channel
and send required packets to host
:param host: host osuToken object
"""
try:
self._spectLock.acquire()
# Stop spectating old client
self.stopSpectating()
# Set new spectator host
self.spectating = host.token
self.spectatingUserID = host.userID
# Add us to host's spectator list
host.spectators.append(self.token)
# Create and join spectator stream
streamName = "spect/{}".format(host.userID)
glob.streams.add(streamName)
self.joinStream(streamName)
host.joinStream(streamName)
# Send spectator join packet to host
host.enqueue(serverPackets.addSpectator(self.userID))
# Create and join #spectator (#spect_userid) channel
glob.channels.addTempChannel("#spect_{}".format(host.userID))
chat.joinChannel(token=self, channel="#spect_{}".format(host.userID), force=True)
if len(host.spectators) == 1:
# First spectator, send #spectator join to host too
chat.joinChannel(token=host, channel="#spect_{}".format(host.userID), force=True)
# Send fellow spectator join to all clients
glob.streams.broadcast(streamName, serverPackets.fellowSpectatorJoined(self.userID))
# Get current spectators list
for i in host.spectators:
if i != self.token and i in glob.tokens.tokens:
self.enqueue(serverPackets.fellowSpectatorJoined(glob.tokens.tokens[i].userID))
# Log
log.info("{} is spectating {}".format(self.username, host.username))
finally:
self._spectLock.release()
def stopSpectating(self):
"""
Stop spectating, leave spectator stream and channel
and send required packets to host
:return:
"""
try:
self._spectLock.acquire()
# Remove our userID from host's spectators
if self.spectating is None or self.spectatingUserID <= 0:
return
if self.spectating in glob.tokens.tokens:
hostToken = glob.tokens.tokens[self.spectating]
else:
hostToken = None
streamName = "spect/{}".format(self.spectatingUserID)
# Remove us from host's spectators list,
# leave spectator stream
# and end the spectator left packet to host
self.leaveStream(streamName)
if hostToken is not None:
hostToken.spectators.remove(self.token)
hostToken.enqueue(serverPackets.removeSpectator(self.userID))
# and to all other spectators
for i in hostToken.spectators:
if i in glob.tokens.tokens:
glob.tokens.tokens[i].enqueue(serverPackets.fellowSpectatorLeft(self.userID))
# If nobody is spectating the host anymore, close #spectator channel
# and remove host from spect stream too
if len(hostToken.spectators) == 0:
chat.partChannel(token=hostToken, channel="#spect_{}".format(hostToken.userID), kick=True, force=True)
hostToken.leaveStream(streamName)
# Console output
log.info("{} is no longer spectating {}. Current spectators: {}".format(self.username, self.spectatingUserID, hostToken.spectators))
# Part #spectator channel
chat.partChannel(token=self, channel="#spect_{}".format(self.spectatingUserID), kick=True, force=True)
# Set our spectating user to 0
self.spectating = None
self.spectatingUserID = 0
finally:
self._spectLock.release()
def updatePingTime(self):
"""
Update latest ping time to current time
:return:
"""
self.pingTime = int(time.time())
def joinMatch(self, matchID):
"""
Set match to matchID, join match stream and channel
:param matchID: new match ID
:return:
"""
# Make sure the match exists
if matchID not in glob.matches.matches:
return
# Match exists, get object
match = glob.matches.matches[matchID]
# Stop spectating
self.stopSpectating()
# Leave other matches
if self.matchID > -1 and self.matchID != matchID:
self.leaveMatch()
# Try to join match
joined = match.userJoin(self)
if not joined:
self.enqueue(serverPackets.matchJoinFail())
return
# Set matchID, join stream, channel and send packet
self.matchID = matchID
self.joinStream(match.streamName)
chat.joinChannel(token=self, channel="#multi_{}".format(self.matchID), force=True)
self.enqueue(serverPackets.matchJoinSuccess(matchID))
if match.isTourney:
# Alert the user if we have just joined a tourney match
self.enqueue(serverPackets.notification("You are now in a tournament match."))
# If an user joins, then the ready status of the match changes and
# maybe not all users are ready.
match.sendReadyStatus()
def leaveMatch(self):
"""
Leave joined match, match stream and match channel
:return:
"""
# Make sure we are in a match
if self.matchID == -1:
return
# Part #multiplayer channel and streams (/ and /playing)
chat.partChannel(token=self, channel="#multi_{}".format(self.matchID), kick=True, force=True)
self.leaveStream("multi/{}".format(self.matchID))
self.leaveStream("multi/{}/playing".format(self.matchID)) # optional
# Set usertoken match to -1
leavingMatchID = self.matchID
self.matchID = -1
# Make sure the match exists
if leavingMatchID not in glob.matches.matches:
return
# The match exists, get object
match = glob.matches.matches[leavingMatchID]
# Set slot to free
match.userLeft(self)
if match.isTourney:
# If an user leaves, then the ready status of the match changes and
# maybe all users are ready. Or maybe nobody is in the match anymore
match.sendReadyStatus()
def kick(self, message="You have been kicked from the server. Please login again.", reason="kick"):
"""
Kick this user from the server
:param message: Notification message to send to this user.
Default: "You have been kicked from the server. Please login again."
:param reason: Kick reason, used in logs. Default: "kick"
:return:
"""
# Send packet to target
log.info("{} has been disconnected. ({})".format(self.username, reason))
if message != "":
self.enqueue(serverPackets.notification(message))
self.enqueue(serverPackets.loginFailed())
# Logout event
logoutEvent.handle(self, deleteToken=self.irc)
def silence(self, seconds = None, reason = "", author = 999):
"""
Silences this user (db, packet and token)
:param seconds: silence length in seconds. If None, get it from db. Default: None
:param reason: silence reason. Default: empty string
:param author: userID of who has silenced the user. Default: 999 (FokaBot)
:return:
"""
if seconds is None:
# Get silence expire from db if needed
seconds = max(0, userUtils.getSilenceEnd(self.userID) - int(time.time()))
else:
# Silence in db and token
userUtils.silence(self.userID, seconds, reason, author)
# Silence token
self.silenceEndTime = int(time.time()) + seconds
# Send silence packet to user
self.enqueue(serverPackets.silenceEndTime(seconds))
# Send silenced packet to everyone else
glob.streams.broadcast("main", serverPackets.userSilenced(self.userID))
def spamProtection(self, increaseSpamRate = True):
"""
Silences the user if is spamming.
:param increaseSpamRate: set to True if the user has sent a new message. Default: True
:return:
"""
# Increase the spam rate if needed
if increaseSpamRate:
self.spamRate += 1
# Silence the user if needed
if self.spamRate > 10:
self.silence(1800, "Spamming (auto spam protection)")
def isSilenced(self):
"""
Returns True if this user is silenced, otherwise False
:return: True if this user is silenced, otherwise False
"""
return self.silenceEndTime-int(time.time()) > 0
def getSilenceSecondsLeft(self):
"""
Returns the seconds left for this user's silence
(0 if user is not silenced)
:return: silence seconds left (or 0)
"""
return max(0, self.silenceEndTime-int(time.time()))
def updateCachedStats(self):
"""
Update all cached stats for this token
:return:
"""
stats = userUtils.getUserStats(self.userID, self.gameMode)
log.debug(str(stats))
if stats is None:
log.warning("Stats query returned None")
return
self.rankedScore = stats["rankedScore"]
self.accuracy = stats["accuracy"]/100
self.playcount = stats["playcount"]
self.totalScore = stats["totalScore"]
self.gameRank = stats["gameRank"]
self.pp = stats["pp"]
def checkRestricted(self):
"""
Check if this token is restricted. If so, send fokabot message
:return:
"""
oldRestricted = self.restricted
self.restricted = userUtils.isRestricted(self.userID)
if self.restricted:
self.setRestricted()
elif not self.restricted and oldRestricted != self.restricted:
self.resetRestricted()
def checkBanned(self):
"""
Check if this user is banned. If so, disconnect it.
:return:
"""
if userUtils.isBanned(self.userID):
self.enqueue(serverPackets.loginBanned())
logoutEvent.handle(self, deleteToken=False)
def setRestricted(self):
"""
Set this token as restricted, send FokaBot message to user
and send offline packet to everyone
:return:
"""
self.restricted = True
chat.sendMessage("FokaBot", self.username, "Your account is currently in restricted mode. Please visit ripple's website for more information.")
def resetRestricted(self):
"""
Send FokaBot message to alert the user that he has been unrestricted
and he has to log in again.
:return:
"""
chat.sendMessage("FokaBot", self.username, "Your account has been unrestricted! Please log in again.")
def joinStream(self, name):
"""
Join a packet stream, or create it if the stream doesn't exist.
:param name: stream name
:return:
"""
glob.streams.join(name, token=self.token)
if name not in self.streams:
self.streams.append(name)
def leaveStream(self, name):
"""
Leave a packets stream
:param name: stream name
:return:
"""
glob.streams.leave(name, token=self.token)
if name in self.streams:
self.streams.remove(name)
def leaveAllStreams(self):
"""
Leave all joined packet streams
:return:
"""
for i in self.streams:
self.leaveStream(i)
def awayCheck(self, userID):
"""
Returns True if userID doesn't know that we are away
Returns False if we are not away or if userID already knows we are away
:param userID: original sender userID
:return:
"""
if self.awayMessage == "" or userID in self.sentAway:
return False
self.sentAway.append(userID)
return True
def addMessageInBuffer(self, chan, message):
"""
Add a message in messages buffer (10 messages, truncated at 50 chars).
Used as proof when the user gets reported.
:param chan: channel
:param message: message content
:return:
"""
if len(self.messagesBuffer) > 9:
self.messagesBuffer = self.messagesBuffer[1:]
self.messagesBuffer.append("{time} - {user}@{channel}: {message}".format(time=time.strftime("%H:%M", time.localtime()), user=self.username, channel=chan, message=message[:50]))
def getMessagesBufferString(self):
"""
Get the content of the messages buffer as a string
:return: messages buffer content as a string
"""
return "\n".join(x for x in self.messagesBuffer)
|
osuripple/pep.py
|
objects/osuToken.py
|
Python
|
agpl-3.0
| 15,993
|
[
"VisIt"
] |
2714760e1cbb10ee30c0dc3c2638a366dd3406bfda58d282c6daf1de42818f50
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
# Test the selection exporters in MDAnalysis.selections
from __future__ import absolute_import
# use StringIO and NamedStream to write to memory instead to temp files
from six.moves import cPickle, StringIO
import re
import numpy as np
from numpy.testing import TestCase, assert_equal, assert_array_equal, dec
from nose.plugins.attrib import attr
from MDAnalysisTests.plugins.knownfailure import knownfailure
from MDAnalysis.tests.datafiles import PSF, DCD
from MDAnalysisTests import parser_not_found
import MDAnalysis
from MDAnalysis.lib.util import NamedStream
class _SelectionWriter(TestCase):
filename = None
max_number = 357 # to keep fixtures smallish, only select CAs up to number 357
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def setUp(self):
self.universe = MDAnalysis.Universe(PSF, DCD)
stream = StringIO()
self.namedfile = NamedStream(stream, self.filename)
def tearDown(self):
del self.universe
del self.namedfile
def _selection(self):
return self.universe.select_atoms("protein and name CA and bynum 1-{0}".format(self.max_number))
def _write(self, **kwargs):
g = self._selection()
g.write(self.namedfile, **kwargs)
return g
def _write_selection(self, **kwargs):
g = self._selection()
g.write(self.namedfile, **kwargs)
return g
def _write_with(self, **kwargs):
g = self._selection()
with self.writer(self.namedfile, **kwargs) as outfile:
outfile.write(g)
return g
def test_write_bad_mode(self):
with self.assertRaises(ValueError):
self._write(name=self.ref_name, mode='a+')
def test_write(self):
self._write(name=self.ref_name)
self._assert_selectionstring()
def test_writeselection(self):
self._write_selection(name=self.ref_name)
self._assert_selectionstring()
def test_write_with(self):
self._write_with(name=self.ref_name)
self._assert_selectionstring()
def ndx2array(lines):
"""Convert Gromacs NDX text file lines to integer array"""
return np.array(" ".join(lines).replace("\n", "").split(), dtype=int)
def lines2one(lines):
"""Join lines and squash all whitespace"""
return " ".join(" ".join(lines).split())
class TestSelectionWriter_Gromacs(_SelectionWriter):
writer = MDAnalysis.selections.gromacs.SelectionWriter
filename = "CA.ndx"
ref_name = "CA_selection"
ref_indices = ndx2array(
[ '5 22 46 65 84 103 122 129 141 153 160 170 \n',
'177 199 206 220 237 247 264 284 303 320 335 357 \n',
]
)
def _assert_selectionstring(self):
header = self.namedfile.readline().strip()
assert_equal(header, "[ {0} ]".format(self.ref_name),
err_msg="NDX file has wrong selection name")
indices = ndx2array(self.namedfile.readlines())
assert_array_equal(indices, self.ref_indices,
err_msg="indices were not written correctly")
class TestSelectionWriter_Charmm(_SelectionWriter):
writer = MDAnalysis.selections.charmm.SelectionWriter
filename = "CA.str"
ref_name = "CA_selection"
ref_selectionstring = lines2one([
"""! MDAnalysis CHARMM selection
DEFINE CA_selection SELECT -
BYNUM 5 .or. BYNUM 22 .or. BYNUM 46 .or. BYNUM 65 .or. -
BYNUM 84 .or. BYNUM 103 .or. BYNUM 122 .or. BYNUM 129 .or. -
BYNUM 141 .or. BYNUM 153 .or. BYNUM 160 .or. BYNUM 170 .or. -
BYNUM 177 .or. BYNUM 199 .or. BYNUM 206 .or. BYNUM 220 .or. -
BYNUM 237 .or. BYNUM 247 .or. BYNUM 264 .or. BYNUM 284 .or. -
BYNUM 303 .or. BYNUM 320 .or. BYNUM 335 .or. BYNUM 357 END
"""])
def _assert_selectionstring(self):
selectionstring = lines2one(self.namedfile.readlines())
assert_equal(selectionstring, self.ref_selectionstring,
err_msg="Charmm selection was not written correctly")
class TestSelectionWriter_PyMOL(_SelectionWriter):
writer = MDAnalysis.selections.pymol.SelectionWriter
filename = "CA.pml"
ref_name = "CA_selection"
ref_selectionstring = lines2one([
"""# MDAnalysis PyMol selection\n select CA_selection, \\
index 5 | index 22 | index 46 | index 65 | index 84 | index 103 | \\
index 122 | index 129 | index 141 | index 153 | index 160 | index 170 | \\
index 177 | index 199 | index 206 | index 220 | index 237 | index 247 | \\
index 264 | index 284 | index 303 | index 320 | index 335 | index 357
"""])
def _assert_selectionstring(self):
selectionstring = lines2one(self.namedfile.readlines())
assert_equal(selectionstring, self.ref_selectionstring,
err_msg="PyMOL selection was not written correctly")
class TestSelectionWriter_VMD(_SelectionWriter):
writer = MDAnalysis.selections.vmd.SelectionWriter
filename = "CA.vmd"
ref_name = "CA_selection"
ref_selectionstring = lines2one([
"""# MDAnalysis VMD selection atomselect macro CA_selection {index 4 21 45 64 83 102 121 128 \\
140 152 159 169 176 198 205 219 \\
236 246 263 283 302 319 334 356 }
"""])
def _assert_selectionstring(self):
selectionstring = lines2one(self.namedfile.readlines())
assert_equal(selectionstring, self.ref_selectionstring,
err_msg="PyMOL selection was not written correctly")
def spt2array(line):
"""Get name of and convert Jmol SPT definition to integer array"""
match = re.search(r'\@~(\w+) \(\{([\d\s]*)\}\)', line)
return match.group(1), np.array(match.group(2).split(), dtype=int)
class TestSelectionWriter_Jmol(_SelectionWriter):
writer = MDAnalysis.selections.jmol.SelectionWriter
filename = "CA.spt"
ref_name, ref_indices = spt2array(
( '@~ca ({4 21 45 64 83 102 121 128 140 152 159 169 176 198 205 219 236'
' 246 263 283 302 319 334 356});')
)
def _assert_selectionstring(self):
header, indices = spt2array(self.namedfile.readline())
assert_equal(header, self.ref_name,
err_msg="SPT file has wrong selection name")
assert_array_equal(indices, self.ref_indices,
err_msg="SPT indices were not written correctly")
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/test_selections.py
|
Python
|
gpl-2.0
| 7,518
|
[
"CHARMM",
"Gromacs",
"Jmol",
"MDAnalysis",
"PyMOL",
"VMD"
] |
1266fd4d7cff3ac4c99b256149cd12cd7cd2cdde8066151e88c30f92a4aeb1f4
|
from configparser import ConfigParser
import mpi4py
import numpy
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
lammps_config = ConfigParser()
lammps_config.read('lammps.cfg')
include_dirs = [
mpi4py.get_include(),
numpy.get_include(),
lammps_config.get('lammps', 'lammps_include_dir'),
lammps_config.get('mpi', 'mpi_include_dir')
]
# TODO: Should maybe include mpi_cxx, mpi, python3.4m
libraries = [lammps_config.get('lammps', 'lammps_library'),
lammps_config.get('mpi', 'mpi_library')]
library_dirs = [lammps_config.get('lammps', 'lammps_library_dir')]
extensions = [
Extension(
'lammps.core',
sources=['lammps/core.pyx'],
include_dirs=include_dirs,
libraries=libraries,
library_dirs=library_dirs,
language='c++'
)
]
setup(
name='lammps',
version='0.1.0',
packages=find_packages(),
package_data={
'lammps': ['data/*.in']
},
description='Pythonic Wrapper to LAMMPS',
long_description='Pythonic Wrapper to LAMMPS (LONG)',
author='Christopher Ostrouchov',
author_email='[email protected]',
url='https://github.com/costrouc/lammps-python',
download_url='https://github.com/costrouc/lammps-python/tarball/master',
keywords=['lammps', 'molecular dynamics', 'cython', 'wrapper'],
ext_modules=cythonize(extensions),
scripts=['scripts/pylammps'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
|
costrouc/lammps-python
|
setup.py
|
Python
|
gpl-3.0
| 1,558
|
[
"LAMMPS"
] |
e285e6718b1ca85572f9e0386d4a4de1dbe086bb063475772aa5b634b5db3f4b
|
#!/usr/bin/env python
# File: plot_icd_mass.py
# Created on: Tue Jun 4 11:31:32 2013
# Last Change: Mon Jul 15 16:35:14 2013
# Purpose of script: <+INSERT+>
# Author: Steven Boada
import pylab as pyl
import cPickle as pickle
def plot_icd_vs_mass():
galaxies = pickle.load(open('galaxies.pickle','rb'))
#galaxies = filter(lambda galaxy:
# galaxy.ston_I > 30. and galaxy.ICD_IH != None, galaxies)
# Make figure
f1 = pyl.figure(1, figsize=(6,4))
f1s1 = f1.add_subplot(121)
f1s2 = f1.add_subplot(122)
#Upper and Lower limit arrow verts
arrowup_verts = [[0.,0.], [-1., -1], [0.,0.],
[0.,-2.], [0.,0.], [1,-1]]
arrowdown_verts = [[0.,0.], [-1., 1], [0.,0.],
[0.,2.], [0.,0.], [1, 1]]
for galaxy in galaxies:
if galaxy.ston_I > 30. and galaxy.ICD_IH != None:
# Add arrows first
if galaxy.ICD_IH > 0.65:
f1s1.scatter(galaxy.Mass, 0.5*100, s=100, marker=None,
verts=arrowup_verts)
elif galaxy.ICD_IH < -0.05:
f1s1.scatter(galaxy.Mass, -0.05*100, s=100, marker=None,
verts=arrowdown_verts)
else:
f1s1.scatter(galaxy.Mass, galaxy.ICD_IH * 100, c='lime',
marker='o', zorder=2, s=50)
if galaxy.ston_J > 30. and galaxy.ICD_JH != None:
# Add arrows first
if galaxy.ICD_JH > 0.5:
f1s2.scatter(galaxy.Mass, 0.2*100, s=100, marker=None,
verts=arrowup_verts)
elif galaxy.ICD_JH < -0.05:
f1s2.scatter(galaxy.Mass, -0.05*100, s=100, marker=None,
verts=arrowdown_verts)
else:
f1s2.scatter(galaxy.Mass, galaxy.ICD_JH * 100, c='lime',
marker='o', zorder=2, s=50)
# Finish Plot
f1s1.axvspan(7.477, 9, facecolor='#FFFDD0', ec='None', zorder=0)
f1s1.axvspan(11, 12, facecolor='#FFFDD0', ec='None', zorder=0)
f1s2.axvspan(7.477, 9, facecolor='#FFFDD0', ec='None', zorder=0)
f1s2.axvspan(11, 12, facecolor='#FFFDD0', ec='None', zorder=0)
f1s1.set_xlim(8,12)
f1s2.set_xlim(8,12)
f1s1.set_xticks([8,9,10,11,12])
f1s2.set_xticks([8,9,10,11,12])
f1s1.axhline(0.0, lw=2, c='b', zorder=0)
f1s2.axhline(0.0, lw=2, c='b', zorder=0)
f1s1.set_xlabel(r"Log Mass ($M_{\odot})$")
f1s1.set_ylabel(r"$\xi[i_{775},H_{160}]$ (%)")
f1s2.set_xlabel(r"Log Mass ($M_{\odot})$")
f1s2.set_ylabel(r"$\xi[J_{125},H_{160}]$ (%)")
pyl.show()
if __name__=='__main__':
plot_icd_vs_mass()
|
boada/ICD
|
sandbox/legacy_plot_code/plot_icd_mass.py
|
Python
|
mit
| 2,607
|
[
"Galaxy"
] |
1e699d79d7373361e4a5f32c3cd504089c88b7ee0b2c0fe824d11a0ebaf1bcc8
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2007, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
import os
import transaction
import Globals
from Products.ZenUtils.ZCmdBase import ZCmdBase
from Products.ZenModel.Report import Report
class ReportLoader(ZCmdBase):
def buildOptions(self):
ZCmdBase.buildOptions(self)
self.parser.add_option('-f', '--force', dest='force',
action='store_true', default=0,
help="Force load all the reports")
def loadDatabase(self):
repdir = os.path.join(os.path.dirname(__file__),"reports")
self.loadDirectory(repdir)
transaction.commit()
def reports(self, directory):
def normalize(f):
return f.replace("_", " ")
def toOrg(path):
path = normalize(path).split("/")
path = path[path.index("reports") + 1:]
return "/" + "/".join(path)
return [(toOrg(p), normalize(f[:-4]), os.path.join(p, f))
for p, ds, fs in os.walk(directory)
for f in fs
if f.endswith(".rpt")]
def unloadDirectory(self, repdir):
self.log.info("removing reports from:%s", repdir)
reproot = self.dmd.Reports
for orgpath, fid, fullname in self.reports(repdir):
rorg = reproot.createOrganizer(orgpath)
if getattr(rorg, fid, False):
rorg._delObject(fid)
while rorg.id != 'Reports':
if not rorg.objectValues():
id = rorg.id
rorg = rorg.getPrimaryParent()
rorg._delObject(id)
def loadDirectory(self, repdir):
self.log.info("loading reports from:%s", repdir)
reproot = self.dmd.Reports
for orgpath, fid, fullname in self.reports(repdir):
rorg = reproot.createOrganizer(orgpath)
if getattr(rorg, fid, False):
if self.options.force:
rorg._delObject(fid)
else:
continue
self.log.info("loading: %s/%s", orgpath, fid)
self.loadFile(rorg, fid, fullname)
def loadFile(self, root, id, fullname):
fdata = file(fullname).read()
rpt = Report(id, text=fdata)
root._setObject(id, rpt)
return rpt
if __name__ == "__main__":
rl = ReportLoader()
rl.loadDatabase()
|
jcurry/ZenPacks.skills1st.deviceReports
|
ZenPacks/skills1st/deviceReports/ReportLoader.py
|
Python
|
gpl-2.0
| 2,863
|
[
"VisIt"
] |
953540c979ae5aee3bdb3dcdb676bed0387c6ba8f0985523d4c96db4286cd8be
|
""" Codes for Scattering by ISM
Based on formalism derived by Macquart & Koay 2013, ApJ, 776, 125
Modified by Prochaska & Neeleman 2017
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
from scipy.special import gamma
from astropy import constants
from astropy import units
from astropy.cosmology import Planck15
from IPython import embed
# Constants
const_re = constants.alpha**2 * constants.a0
def theta_mist(n_e, nu_obs, L=50*units.kpc, R=1*units.pc, fV=1.):
"""
Estimate the scattering angle for a mist, following the
calculations by M. McQuinn presented in Prochaska+2019
Args:
n_e (Quantity):
Electron density
nu_obs (Quantity):
Frequency of the radiation, observed
L (Quantity, optional):
Size of the region of the mist
R (Quantity, optional):
Size of the clouds
fV (float, optional):
Filling factor of the bubbles
Returns:
Quantity: Angle in radians
"""
# Constants
me = 9.10938e-28 #(*electron mass: gr *)
estat = 4.8027e-10 # (*charge of electron in statcoulombs *)
phi0 = 2.2 # Numerical estimation
theta = 2 * np.pi * estat**2 * n_e.to('cm**-3').value * phi0 / (
me*(2*np.pi)**2 * 1e18 * nu_obs.to('GHz').value**2) * np.sqrt(2*L*fV/R) * units.radian
# Return
return theta
def tau_mist(n_e, nu_obs, z_FRB, zL, L=50*units.kpc, R=1*units.pc, fV=1., cosmo=None):
"""
Temporal broadening for a mist of spherical clouds following the
calculations by M. McQuinn presented in Prochaska+2019
Args:
n_e (Quantity):
Electron density
nu_obs (Quantity):
Frequency of the radiation, observed
z_FRB (float):
Redshift of the FRB
zL (float):
Redshift of the intervening lens
L (Quantity, optional):
Size of the region of the mist
R (Quantity, optional):
Size of the clouds
fV (float, optional):
Filling factor of the bubbles
cosmo (Cosmology, optional):
Returns:
Quantity: temporal broadening in seconds
"""
if cosmo is None:
cosmo = Planck15
D_S = cosmo.angular_diameter_distance(z_FRB)
D_L = cosmo.angular_diameter_distance(zL)
D_LS = cosmo.angular_diameter_distance_z1z2(zL, z_FRB)
tau = theta_mist(n_e, nu_obs, L=L, R=R, fV=fV).to('radian').value**2 * (1+zL) * (D_LS*D_L/D_S/2/constants.c)
return tau.to('s')
def ne_from_tau_mist(tau_scatt, z_FRB, zL, nu_obs, L=50*units.kpc, R=1*units.pc,
fV=1., cosmo=None, verbose=False):
"""
n_e from temporal broadening for a mist of spherical clouds following the
calculations by M. McQuinn presented in Prochaska+2019
Args:
tau_scatt (Quantity):
Observed width of the pulse
z_FRB (float):
Redshift of the FRB
zL (float):
Redshift of the intervening lens
nu_obs (Quantity):
Observed freqency
L (Quantity, optional):
Size of the region of the mist
R (Quantity, optional):
Size of the clouds
fV (float, optional):
Filling factor of the bubbles
cosmo (Cosmology, optional):
verbose (bool, optional):
Returns:
Quantity: density in cm**-3
"""
if cosmo is None:
cosmo = Planck15
D_S = cosmo.angular_diameter_distance(z_FRB)
D_L = cosmo.angular_diameter_distance(zL)
D_LS = cosmo.angular_diameter_distance_z1z2(zL, z_FRB)
# Scale -- It is more correct to use the distances we used in our paper as reference (although result is very similar)
D_term = (1.05*.262)/1.23*1000*units.Mpc
nu_181112 = 1.3 * units.GHz
nu_scale = (nu_obs / nu_181112)**2
z_scale = ((1+zL)/(1+0.36738))
cosmo_scale = ((D_L*D_LS/D_S)/D_term)
# Branch
Rlim = 0.011*units.pc * np.sqrt(tau_scatt/(40*units.microsecond)*cosmo_scale)/z_scale
if R < Rlim: #0.011*units.pc * np.sqrt(tau_scatt/(40*units.microsecond)):
if verbose:
print("In R<{} limit".format(Rlim.to('pc')))
n_e = (0.1*units.cm**-3) * 0.568 * np.sqrt(tau_scatt/(40*units.microsecond)) / np.sqrt(
(L/50/units.kpc) * (fV/1e-3) * (0.1*units.pc/R))
# Scalings
z_scale = z_scale**(3/2)
cosmo_scale = cosmo_scale**(-1/2)
else:
if verbose:
print("In R>{} limit".format(Rlim.to('pc')))
n_e = (0.1*units.cm**-3) * 5.03 / np.sqrt((L/50/units.kpc) * (fV/1e-3)) * (
R/(0.1*units.pc))**(3/2)
# Scalings
z_scale = z_scale**2
cosmo_scale = cosmo_scale**(-1)
# Return
return n_e.to('cm**-3') * cosmo_scale * nu_scale * z_scale
def ne_from_tau_kolmogorov(tau_scatt, z_FRB, zL, nu_obs, L=50*units.kpc, L0=1*units.kpc, alpha=1.,
cosmo=None, debug=False):
"""
Estimate n_e based on observed temporal broadening
Scaled from Equation 1 of Prochaska et al. 2019
Args:
tau_scatt (Quantity):
Observed width of the pulse
z_FRB (float):
Redshift of the FRB
zL (float):
Redshift of the intervening lens
nu_obs (Quantity):
Observed freqency
L (Quantity, optional):
Size of the intervening gas
L0 (Quantity, optional):
Turbulence length scale
alpha (float, optional):
Filling factor and fudge factor term
cosmo (Cosmology, optional):
Returns:
Quantity: <n_e>
"""
if cosmo is None:
cosmo = Planck15
n_e_unscaled = 2e-3 * alpha**(-1) * (L/(50*units.kpc))**(-1/2) * (L0/(1*units.kpc))**(1/3) * (
tau_scatt/(40*1e-6*units.s))**(5/12)
# FRB 181112
D_S_181112 = cosmo.angular_diameter_distance(0.47550)
D_L_181112 = cosmo.angular_diameter_distance(0.36738)
D_LS_181112 = cosmo.angular_diameter_distance_z1z2(0.36738, 0.47550)
D_term = (D_L_181112*D_LS_181112/D_S_181112)**(-5/12)
zterm = (1+0.36738)**(17/12)
# Now scale
D_S = cosmo.angular_diameter_distance(z_FRB)
D_L = cosmo.angular_diameter_distance(zL)
D_LS = cosmo.angular_diameter_distance_z1z2(zL, z_FRB)
cosmo_scale = (D_L*D_LS/D_S)**(-5/12) / D_term
if debug:
print("D_S", D_S.to('Gpc'))
print("D_L", D_L.to('Gpc'))
print("D_LS", D_LS.to('Gpc'))
embed(header='211 of turb_scattering')
# Redshift
z_scale = (1+zL)**(17/12) / zterm
# Frequency
nu_181112 = 1.3 * units.GHz
nu_scale = (nu_obs / nu_181112)**(22/12)
# Scale
n_e = n_e_unscaled * z_scale * cosmo_scale * nu_scale
return n_e / units.cm**3
class Turbulence(object):
""" Class for turbulence calculations in a plasma
Primarily used for scattering calculations
"""
def __init__(self, ne, l0, L0, zL, beta=11./3, SM=None, verbose=True, **kwargs):
"""
Parameters
----------
ne : Quantity
Electron density
l0 : Quantity
Inner scale
L0 : Quantity
Outer scale
SM : Quantity, optional
Generally calculated but can be input
zL : float
Redshift of scattering medium
beta : float, optional
Exponent of turbulence. Default is for Kolmogorov
**kwargs :
Passed to init methods
e.g. sets SM if DL is provided
"""
# Init
self.beta = beta
self.zL = zL
self.ne = ne
self.l0 = l0.to('pc')
self.L0 = L0.to('kpc')
self.verbose = verbose
# Set SM?
self.SM = None
if SM is not None:
self.SM = SM
else:
if 'DL' in kwargs.keys():
self.set_SM_obj(kwargs['DL'])
# Might check for units here (SM and beta)
# Set rdiff based on its 'regime'
self.regime = 0 # Undefined
if self.SM is not None:
if 'lobs' in kwargs.keys():
self.set_rdiff(kwargs['lobs'])
@property
def CN2_gal(self):
""" Amplitude of the turbulence per unit length
Equation 29 from Macquarty & Koay 2013
Assumes Kolmogorov
Returns
-------
CN2 : Quantity
"""
# Simple expression
CN2 = 1.8e-3 * (self.ne/(1e-2*units.cm**(-3)))**2 * (self.L0/(0.001*units.pc))**(-2/3)
return (CN2 * units.m**(-20/3.)).si
@property
def SMeff(self):
""" Effective SM
Returns
-------
SMeff : Quantity
"""
if self.SM is None:
return None
else:
return self.SM / (1+self.zL)**2
def set_SM_obj(self, DL):
""" Specify SM for a discrete object (e.g. galaxy)
Equation 31 from Macquart & Koay 2013
Assumes Kolmogorov
Parameters
----------
DL : Quantity
Thickness of the object
Returns
-------
"""
self.SM = (self.CN2_gal * DL).decompose()
if self.verbose:
print("Set SM={}".format(self.SM.decompose()))
def set_cloudlet_rdiff(self, lobs, fa):
"""
Taken from JP notes
Args:
lobs:
fa (int): Number of clouds intersected
Returns:
"""
# ASSUMING rdiff > l0 for now
self.rdiff = self.L0**(-1/5) * (
2*const_re**2 * (lobs/(1+self.zL))**2 * self.ne**2 * fa)**(-3/5)
def set_rdiff(self, lobs):
""" Calculate rdiff in the two regimes and adopt the right one
Requires that SM was set first
Parameters
----------
lobs : Quantity
Observed wavelength
Returns
-------
Nothing; sets self.rdiff and self.regime
"""
# Check
if self.SM is None:
raise IOError("Need to set SM first!")
# Useful expression
C = (np.pi*const_re**2 * lobs**2)/(1+self.zL)**2
# Is rdiff < l0?
r1 = 1. / np.sqrt(C * self.SM * (self.l0**(self.beta-4.) * (self.beta/4.) *
gamma(-self.beta/2.)))
# Is rdiff >> l0?
r2 = np.power(2**(2-self.beta) * C * self.beta * self.SM * gamma(-self.beta/2.) /
gamma(self.beta/2.), 1./(2-self.beta))
# Query
if r1 < self.l0:
if self.verbose:
print('In the regime rdiff < l_0')
self.rdiff = r1.to('m')
self.regime = 1 # rdiff < l0
elif r2 > 10*self.l0:
if self.verbose:
print('In the regime rdiff >> l_0')
self.rdiff = r2.to('m')
self.regime = 2 # rdiff >> l0
else: # Undefined
if self.verbose:
print('In the regime rdiff >~ l_0. Be careful here!')
self.rdiff = r2.to('m')
self.regime = 2 # rdiff >> l0
def angular_broadening(self, lobs, zsource, cosmo=None):
""" Broadening of a point source due to turbulent scattering
Parameters
----------
lobs : Quantity
Observed wavelength
zsource : float
Redshift of radio source
Returns
-------
theta : Quantity
Angular broadening. Radius (half-width at half-max)
"""
if self.regime == 0:
raise ValueError("Need to set rdiff and the regime first!")
if cosmo is None:
from astropy.cosmology import Planck15 as cosmo
# f
if (self.regime == 1) or np.isclose(self.beta,4.):
f = 1.18
elif (self.regime == 2) and np.isclose(self.beta, 11/3.):
f = 1.01
# Distances
D_S = cosmo.angular_diameter_distance(zsource)
D_LS = cosmo.angular_diameter_distance_z1z2(self.zL, zsource)
if self.verbose:
print("D_LS={}, D_S={}".format(D_LS, D_S))
D_LS_D_S = D_LS/D_S
# Evaluate
k = 2*np.pi / (lobs / (1+self.zL)) # Are we sure about this (1+z) factor?!
self.theta = f * D_LS_D_S / (k * self.rdiff) * units.radian
return self.theta.to('arcsec')
def temporal_smearing(self, lobs, zsource, cosmo=None):
""" Temporal smearing due to turbulent scattering
Parameters
----------
lobs : Quantity
Observed wavelength
zsource : float
cosmo : astropy.cosmology, optional
Returns
-------
tau : Quantity
temporal broadening
"""
# Cosmology
if cosmo is None:
cosmo = Planck15
D_S = cosmo.angular_diameter_distance(zsource)
D_L = cosmo.angular_diameter_distance(self.zL)
D_LS = cosmo.angular_diameter_distance_z1z2(self.zL, zsource)
# Angular
theta = self.angular_broadening(lobs, zsource, cosmo=cosmo)
# Calculate
tau = D_L*D_S*(theta.to('radian').value)**2 / D_LS / constants.c / (1+self.zL)
# Return
return tau.to('ms')
def __repr__(self):
txt = '<{:s}'.format(self.__class__.__name__)
#
txt = txt + ' ne={},'.format(self.ne.to('cm**-3'))
txt = txt + ' l0={:.3E},'.format(self.l0.to('pc'))
txt = txt + ' L0={},'.format(self.L0.to('pc'))
txt = txt + ' beta={},'.format(self.beta)
txt = txt + ' zL={}'.format(self.zL)
#txt = txt + ' SMeff={}'.format(self.SMeff)
txt = txt + '>'
return (txt)
|
FRBs/DM
|
frb/turb_scattering.py
|
Python
|
bsd-3-clause
| 13,651
|
[
"Galaxy"
] |
a414640eeb8ffc62561f6b14330246bd611cbb308a5d169a2819cc43b76fdac8
|
# -*- coding: utf-8 -*-
"""
solace.tests.link_check
~~~~~~~~~~~~~~~~~~~~~~~
A test that finds 404 links in the default templates.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import unittest
from urlparse import urljoin
from solace.tests import SolaceTestCase, html_xpath
from solace import models, settings
from solace.database import session
BASE_URL = 'http://localhost/'
MIN_VISITED = 12
class LinkCheckTestCase(SolaceTestCase):
def test_only_valid_links(self):
"""Make sure that all links are valid"""
settings.LANGUAGE_SECTIONS = ['en']
user = models.User('user1', '[email protected]', 'default')
user.is_admin = True
banned_user = models.User('user2', '[email protected]', 'default')
banned_user.is_banned = True
topic = models.Topic('en', 'This is a test topic', 'Foobar', user)
post1 = models.Post(topic, user, 'meh1')
post2 = models.Post(topic, user, 'meh2')
topic.accept_answer(post1)
session.commit()
visited_links = set()
def visit(url):
url = urljoin(BASE_URL, url).split('#', 1)[0]
if not url.startswith(BASE_URL) or url in visited_links:
return
visited_links.add(url)
path = '/' + url.split('/', 3)[-1]
if path.startswith('/logout?'):
return
response = self.client.get(path, follow_redirects=True)
self.assertEqual(response.status_code, 200)
for link in html_xpath(response.html, '//html:a[@href]'):
visit(link.attrib['href'])
# logged out
visit('/')
self.assert_(len(visited_links) > MIN_VISITED)
# logged in
visited_links.clear()
self.login('user1', 'default')
visit('/')
self.assert_(len(visited_links) > MIN_VISITED)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LinkCheckTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
mitsuhiko/solace
|
solace/tests/link_check.py
|
Python
|
bsd-3-clause
| 2,159
|
[
"VisIt"
] |
0788ee5ee8d0dc8528b2db10b8c41d805ebfb2f44e096e0ad1677b90ac7ded61
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ===========================================================================
# eXe
# Copyright 2012, Pedro Peña Pérez, Open Phoenix IT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ===========================================================================
"""
This is the main Javascript page.
"""
import copy
import os
import json
import sys
import logging
import traceback
import shutil
import tempfile
import base64
from exe.engine.version import release, revision
from twisted.internet import threads, reactor, defer
from exe.webui.livepage import RenderableLivePage,\
otherSessionPackageClients, allSessionClients, allSessionPackageClients
from nevow import loaders, inevow, tags
from nevow.livepage import handler, IClientHandle, js
from exe.jsui.idevicepane import IdevicePane
from exe.jsui.outlinepane import OutlinePane
from exe.jsui.recentmenu import RecentMenu
from exe.jsui.stylemenu import StyleMenu
from exe.jsui.propertiespage import PropertiesPage
from exe.jsui.templatemenu import TemplateMenu
from exe.webui.authoringpage import AuthoringPage
from exe.webui.stylemanagerpage import StyleManagerPage
from exe.webui.renderable import File
from exe.export.websiteexport import WebsiteExport
from exe.export.textexport import TextExport
from exe.export.singlepageexport import SinglePageExport
from exe.export.scormexport import ScormExport
from exe.export.imsexport import IMSExport
from exe.export.xliffexport import XliffExport
from exe.importers.xliffimport import XliffImport
from exe.importers.scanresources import Resources
from exe.engine.path import Path, toUnicode, TempDirPath
from exe.engine.package import Package
from exe.engine.template import Template
from exe import globals as G
from tempfile import mkdtemp, mkstemp
from exe.engine.mimetex import compile
from urllib import unquote, urlretrieve
from exe.engine.locationbuttons import LocationButtons
from exe.export.epub3export import Epub3Export
from exe.export.xmlexport import XMLExport
from requests_oauthlib import OAuth2Session
from exe.webui.oauthpage import ProcomunOauth
from suds.client import Client
from exe.export.pages import forbiddenPageNames
from exe.engine.lom import lomsubs
from exe.engine.lom.lomclassification import Classification
import zipfile
log = logging.getLogger(__name__)
PROCOMUN_WSDL = ProcomunOauth.BASE_URL + '/oauth_services?wsdl'
class MainPage(RenderableLivePage):
"""
This is the main Javascript page. Responsible for handling URLs.
"""
_templateFileName = 'mainpage.html'
name = 'to_be_defined'
def __init__(self, parent, package, session, config):
"""
Initialize a new Javascript page
'package' is the package that we look after
"""
self.name = package.name
self.session = session
RenderableLivePage.__init__(self, parent, package, config)
self.putChild("resources", File(package.resourceDir))
# styles directory
# self.putChild("stylecss", File(self.config.stylesDir)
mainjs = Path(self.config.jsDir).joinpath('templates', 'mainpage.html')
self.docFactory = loaders.htmlfile(mainjs)
# Create all the children on the left
self.outlinePane = OutlinePane(self)
self.idevicePane = IdevicePane(self)
self.styleMenu = StyleMenu(self)
self.recentMenu = RecentMenu(self)
self.templateMenu = TemplateMenu(self)
# And in the main section
self.propertiesPage = PropertiesPage(self)
self.authoringPage = None
self.previewDir = None
self.authoringPages = {}
self.classificationSources = {}
G.application.resourceDir = Path(package.resourceDir)
self.location_buttons = LocationButtons()
# Save package temporarily
self.tempPackage = None
def renderHTTP(self, ctx):
"""
Called when rendering the MainPage.
"""
# If we are realoading a template, try to translate it in
# case its language has changed
if self.package.isTemplate and not self.package.isChanged:
# We have to reload the template in case it has been already translated before
template = Package.load(self.config.templatesDir / self.package.get_templateFile() + '.elt', isTemplate=True)
template.set_lang(self.package.lang)
# Copy level names and iDevices
self.package._levelNames = copy.copy(template._levelNames)
self.package.idevices = copy.copy(template.idevices)
# TODO: This should be done properly
self.package.description = copy.copy(template.description)
self.package.title = copy.copy(template.title)
self.package.footer = copy.copy(template.footer)
self.package.objectives = copy.copy(template.objectives)
self.package.preknowledge = copy.copy(template.preknowledge)
self.package.author = copy.copy(template.author)
# Copy the nodes and update the root and current ones
# Be carefull not to use copy.copy when assigning root and currentNode as this will create entirely new nodes
self.package._nodeIdDict = copy.copy(template._nodeIdDict)
rootkey = [k for k,v in self.package._nodeIdDict.items() if not v.parent][0]
self.package.root = self.package._nodeIdDict[rootkey]
self.package.currentNode = self.package._nodeIdDict[rootkey]
# Delete the template as we don't need it in memory anymore
del template
# We have to go through all nodes to add the correct reference
# to the current package
for node in self.package._nodeIdDict.itervalues():
node._package = self.package
self.package.translatePackage()
self.package.isChanged = False
# Call parent's renderHTTP method
return super(MainPage, self).renderHTTP(ctx)
def child_authoring(self, ctx):
"""
Returns the authoring page that corresponds to
the url http://127.0.0.1:port/package_name/authoring
"""
request = inevow.IRequest(ctx)
if 'clientHandleId' in request.args:
clientid = request.args['clientHandleId'][0]
if clientid not in self.authoringPages:
self.authoringPages[clientid] = AuthoringPage(self)
self.children.pop('authoring')
return self.authoringPages[clientid]
else:
raise Exception('No clientHandleId in request')
def child_preview(self, ctx):
if not self.package.previewDir:
stylesDir = self.config.stylesDir / self.package.style
self.package.previewDir = TempDirPath()
self.exportWebSite(None, self.package.previewDir, stylesDir)
self.previewPage = File(self.package.previewDir / self.package.name)
return self.previewPage
def child_taxon(self, ctx):
"""
Doc
"""
request = inevow.IRequest(ctx)
data = []
if 'source' in request.args:
if 'identifier' in request.args:
source = request.args['source'][0]
if source:
if source not in self.classificationSources:
self.classificationSources[source] = Classification()
try:
self.classificationSources[source].setSource(source, self.config.configDir)
except:
pass
identifier = request.args['identifier'][0]
if identifier == 'false':
identifier = False
if source.startswith("etb-lre_mec-ccaa"):
stype = 2
else:
stype = 1
try:
data = self.classificationSources[source].getDataByIdentifier(identifier, stype=stype)
except:
pass
return json.dumps({'success': True, 'data': data})
def goingLive(self, ctx, client):
"""Called each time the page is served/refreshed"""
# inevow.IRequest(ctx).setHeader('content-type', 'application/vnd.mozilla.xul+xml')
# Set up named server side funcs that js can call
def setUpHandler(func, name, *args, **kwargs):
"""
Convience function link funcs to hander ids
and store them
"""
kwargs['identifier'] = name
hndlr = handler(func, *args, **kwargs)
hndlr(ctx, client) # Stores it
setUpHandler(self.handleSaveEXeUIversion,'saveEXeUIversion')
setUpHandler(self.handleIsExeUIAdvanced,'eXeUIVersionCheck')
setUpHandler(self.handleIsPackageDirty, 'isPackageDirty')
setUpHandler(self.handleIsPackageTemplate, 'isPackageTemplate')
setUpHandler(self.handlePackageFileName, 'getPackageFileName')
setUpHandler(self.handleSavePackage, 'savePackage')
setUpHandler(self.handleLoadPackage, 'loadPackage')
setUpHandler(self.recentMenu.handleLoadRecent, 'loadRecent')
# Task 1080, jrf
# setUpHandler(self.handleLoadTutorial, 'loadTutorial')
setUpHandler(self.recentMenu.handleClearRecent, 'clearRecent')
setUpHandler(self.handleImport, 'importPackage')
setUpHandler(self.handleCancelImport, 'cancelImportPackage')
setUpHandler(self.handleExport, 'exportPackage')
setUpHandler(self.handleExportProcomun, 'exportProcomun')
setUpHandler(self.handleXliffExport, 'exportXliffPackage')
setUpHandler(self.handleQuit, 'quit')
setUpHandler(self.handleBrowseURL, 'browseURL')
setUpHandler(self.handleMergeXliffPackage, 'mergeXliffPackage')
setUpHandler(self.handleInsertPackage, 'insertPackage')
setUpHandler(self.handleExtractPackage, 'extractPackage')
setUpHandler(self.outlinePane.handleSetTreeSelection, 'setTreeSelection')
setUpHandler(self.handleClearAndMakeTempPrintDir, 'makeTempPrintDir')
setUpHandler(self.handleRemoveTempDir, 'removeTempDir')
setUpHandler(self.handleTinyMCEimageChoice, 'previewTinyMCEimage')
setUpHandler(self.handleTinyMCEimageDragDrop, 'previewTinyMCEimageDragDrop')
setUpHandler(self.handleTinyMCEmath, 'generateTinyMCEmath')
setUpHandler(self.handleTinyMCEmathML, 'generateTinyMCEmathML')
setUpHandler(self.handleTestPrintMsg, 'testPrintMessage')
setUpHandler(self.handleReload, 'reload')
setUpHandler(self.handleSourcesDownload, 'sourcesDownload')
setUpHandler(self.handleUploadFileToResources, 'uploadFileToResources')
# For the new ExtJS 4.0 interface
setUpHandler(self.outlinePane.handleAddChild, 'AddChild')
setUpHandler(self.outlinePane.handleDelNode, 'DelNode')
setUpHandler(self.outlinePane.handleRenNode, 'RenNode')
setUpHandler(self.outlinePane.handlePromote, 'PromoteNode')
setUpHandler(self.outlinePane.handleDemote, 'DemoteNode')
setUpHandler(self.outlinePane.handleUp, 'UpNode')
setUpHandler(self.outlinePane.handleDown, 'DownNode')
setUpHandler(self.handleCreateDir, 'CreateDir')
setUpHandler(self.handleOverwriteLocalStyle, 'overwriteLocalStyle')
setUpHandler(self.handleSaveTemplate, 'saveTemplate')
setUpHandler(self.handleLoadTemplate, 'loadTemplate')
setUpHandler(self.handleMetadataWarning, 'showMetadataWarning')
setUpHandler(self.hideMetadataWarningForever, 'hideMetadataWarningForever')
setUpHandler(self.handlePackagePropertiesValidation, 'validatePackageProperties')
self.idevicePane.client = client
self.styleMenu.client = client
self.templateMenu.client = client
self.webServer.stylemanager.client = client
self.webServer.templatemanager.client = client
if not self.webServer.monitoring:
self.webServer.monitoring = True
self.webServer.monitor()
def render_config(self, ctx, data):
config = {
'lastDir': G.application.config.lastDir,
'locationButtons': self.location_buttons.buttons,
'lang': G.application.config.locale.split('_')[0],
'showPreferences': G.application.config.showPreferencesOnStart == '1' and not G.application.preferencesShowed,
'showNewVersionWarning': G.application.config.showNewVersionWarningOnStart == '1' and not G.application.newVersionWarningShowed,
'release' : release,
'loadErrors': G.application.loadErrors,
'showIdevicesGrouped': G.application.config.showIdevicesGrouped == '1',
'authoringIFrameSrc': '%s/authoring?clientHandleId=%s' % (self.package.name, IClientHandle(ctx).handleId),
'pathSep': os.path.sep,
'autosaveTime': float(G.application.config.autosaveTime),
'snap': G.application.snap
}
# When working with chinese, we need to add the full language string
# TODO: We should test if we really need to split the locale
if G.application.config.locale.split('_')[0] == 'zh':
config['lang'] = G.application.config.locale
G.application.preferencesShowed = True
G.application.newVersionWarningShowed = True
G.application.loadErrors = []
return tags.script(type="text/javascript")["var config = %s" % json.dumps(config)]
def render_jsuilang(self, ctx, data):
return ctx.tag(src="../jsui/i18n/" + unicode(G.application.config.locale) + ".js")
def render_extjslang(self, ctx, data):
return ctx.tag(src="../jsui/extjs/locale/ext-lang-" + unicode(G.application.config.locale) + ".js")
def render_htmllang(self, ctx, data):
lang = G.application.config.locale.replace('_', '-').split('@')[0]
attribs = {'lang': unicode(lang), 'xml:lang': unicode(lang), 'xmlns': 'http://www.w3.org/1999/xhtml'}
return ctx.tag(**attribs)
def render_version(self, ctx, data):
revstring = ''
if G.application.snap:
revstring = ' (SNAP)'
elif G.application.standalone:
revstring = ' (standalone)'
elif G.application.portable:
revstring = ' (portable)'
return [tags.p()["Version: %s%s" % (release, revstring)]]
def handleTestPrintMsg(self, client, message):
"""
Prints a test message, and yup, that's all!
"""
print "Test Message: ", message, " [eol, eh!]"
def handleIsPackageDirty(self, client, ifClean, ifDirty):
"""
Called by js to know if the package is dirty or not.
ifClean is JavaScript to be eval'ed on the client if the package has
been changed
ifDirty is JavaScript to be eval'ed on the client if the package has not
been changed
"""
if self.package.isChanged:
client.sendScript(ifDirty)
else:
client.sendScript(ifClean)
def handleIsPackageTemplate(self, client, ifTemplate, ifNotTemplate):
"""
Called by js to know if the package is a template or not.
It also checks if the package has already been modified.
"""
if self.package.isTemplate and not self.package.isChanged:
client.sendScript(ifTemplate)
else:
client.sendScript(ifNotTemplate)
def handlePackageFileName(self, client, onDone, onDoneParam,export_type_name):
"""
Calls the javascript func named by 'onDone' passing as the
only parameter the filename of our package. If the package
has never been saved or loaded, it passes an empty string
'onDoneParam' will be passed to onDone as a param after the
filename
"""
client.call(onDone, unicode(self.package.filename), onDoneParam,export_type_name)
def b4save(self, client, inputFilename, ext, msg):
"""
Call this before saving a file to get the right filename.
Returns a new filename or 'None' when attempt to override
'inputFilename' is the filename given by the user
'ext' is the extension that the filename should have
'msg' will be shown if the filename already exists
"""
if not inputFilename.lower().endswith(ext):
inputFilename += ext
# If after adding the extension there is a file
# with the same name, fail and show an error
if Path(inputFilename).exists():
explanation = _(u'"%s" already exists.\nPlease try again with a different filename') % inputFilename
msg = u'%s\n%s' % (msg, explanation)
client.alert(msg)
raise Exception(msg)
# When saving a template, we don't check for the filename
# before this state, so we have to check for duplicates
# here
if ext.lower() == '.elt' and Path(inputFilename).exists():
explanation = _(u'"%s" already exists.\nPlease try again with a different filename') % inputFilename
msg = u'%s\n%s' % (msg, explanation)
client.alert(msg)
raise Exception(msg)
return inputFilename
def handleSavePackage(self, client, filename=None, onDone=None,export_type_name=None):
"""
Save the current package
'filename' is the filename to save the package to
'onDone' will be evaled after saving instead or redirecting
to the new location (in cases of package name changes).
(This is used where the user goes file|open when their
package is changed and needs saving)
"""
try:
filename = Path(filename, 'utf-8')
except:
filename = None
# If the script is not passing a filename to us,
# Then use the last filename that the package was loaded from/saved to
if not filename:
filename = self.package.filename
assert filename, 'Somehow save was called without a filename on a package that has no default filename.'
saveDir = filename.dirname()
if saveDir and not saveDir.isdir():
client.alert(_(u'Cannot access directory named ') + unicode(saveDir) + _(u'. Please use ASCII names.'))
return
oldName = self.package.name
extension = filename.splitext()[1]
if extension == '.elt':
return self.handleSaveTemplate(client, filename.basename(), onDone, edit=True)
# Add the extension if its not already there and give message if not saved
filename = self.b4save(client, filename, '.elp', _(u'SAVE FAILED!'))
name = str(filename.basename().splitext()[0])
if name.upper() in forbiddenPageNames:
client.alert(_('SAVE FAILED!\n"%s" is not a valid name for a package') % str(name))
return
try:
self.package.save(filename) # This can change the package name
except Exception, e:
client.alert(_('SAVE FAILED!\n%s') % str(e))
raise
# Take into account that some names are not allowed, so we have to take care of that before reloading
if G.application.webServer is not None and self.package.name in G.application.webServer.invalidPackageName:
self.package._name = self.package._name + '_1'
if not export_type_name:
# Tell the user and continue
if onDone:
client.alert(_(u'Package saved to: %s') % filename, onDone)
elif self.package.name != oldName:
# Redirect the client if the package name has changed
self.webServer.root.putChild(self.package.name, self)
log.info('Package saved, redirecting client to /%s' % self.package.name)
client.alert(_(u'Package saved to: %s') % filename, 'eXe.app.gotoUrl("/%s")' % self.package.name.encode('utf8'), \
filter_func=otherSessionPackageClients)
else:
# client.alert(_(u'Package saved to: %s') % filename, filter_func=otherSessionPackageClients)
# A nice notification instead of an alert
filename = _('Package saved to: %s') % filename.replace("\\","\\/")
client.sendScript(u'eXe.app.notifications.savedPackage("%s")' % filename)
def handleSaveTemplate(self, client, templatename=None, onDone=None, edit=False):
'''Save template'''
if not templatename.endswith(".elt"):
filename = Path(self.config.templatesDir/templatename +'.elt', 'utf-8')
else:
filename = Path(self.config.templatesDir/templatename, 'utf-8')
templatename = str(filename.basename().splitext()[0])
if edit == False:
filename = self.b4save(client, filename, '.elt', _(u'SAVE FAILED!'))
name = str(filename.basename().splitext()[0])
if name.upper() in forbiddenPageNames:
client.alert(_('SAVE FAILED!\n"%s" is not a valid name for a template') % str(templatename))
return
try:
configxmlData = '<?xml version="1.0"?>\n'
configxmlData += '<template>\n'
configxmlData += '<name>'+templatename+'</name>\n'
configxmlData += '</template>'
# Make the root node the current one
self.package.currentNode = self.package.root
# Save the template
self.package.save(filename, isTemplate=True, configxml=configxmlData)
except Exception, e:
client.alert(_('SAVE FAILED!\n%s') % str(e))
raise
template = Template(filename)
self.config.templateStore.addTemplate(template)
client.alert(_(u'Template saved: %s') % templatename, onDone)
def handleLoadPackage(self, client, filename, filter_func=None):
"""Load the package named 'filename'"""
package = self._loadPackage(client, filename, newLoad=True)
self.session.packageStore.addPackage(package)
self.webServer.root.bindNewPackage(package, self.session)
if package.load_message:
client.alert(package.load_message,
onDone=(u'eXe.app.gotoUrl("/%s")' % package.name).encode('utf8'),
filter_func=filter_func)
else:
client.sendScript((u'eXe.app.gotoUrl("/%s")' % package.name).encode('utf8'), filter_func=filter_func)
def handleLoadTemplate(self, client, filename):
"""Load the template named 'filename'"""
# By transforming it into a Path, we ensure that it is using the correct directory separator
template = self._loadPackage(client, Path(filename), newLoad=True, isTemplate=True)
self.webServer.root.bindNewPackage(template, self.session)
client.sendScript((u'eXe.app.gotoUrl("/%s")' % template.name).encode('utf8'), filter_func=allSessionPackageClients)
def handleMetadataWarning(self, client, export_type):
"""
Checks if the package metadata has been changed and shows
a warning to the user.
"""
if self.config.metadataWarning == "1" and self.package.has_custom_metadata():
client.call(u'eXe.app.getController("Toolbar").showMetadataWarning', export_type, '')
else:
client.call(u'eXe.app.getController("Toolbar").processExportEventValidationStep', export_type, '')
def hideMetadataWarningForever(self, client):
"""
Updates the user configuration to hide the metadata warning when exporting
for the current user.
"""
self.config.metadataWarning = "0"
def handlePackagePropertiesValidation(self, client, export_type):
invalid_properties = self.package.valid_properties(export_type)
if len(invalid_properties) == 0:
client.call(u'eXe.app.getController("Toolbar").exportPackage', export_type, '')
else:
invalid_properties_str = u''
for prop in invalid_properties:
invalid_properties_str += prop.get('name') + '|' + prop.get('reason')
if 'allowed_values' in prop:
invalid_properties_str += '|' + prop.get('allowed_values')
invalid_properties_str += ','
invalid_properties_str = invalid_properties_str[:-1]
# Get file system encoding
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
# Turns package filename passed it to unicode when call javascript function
client.call(u'eXe.app.getController("Toolbar").packagePropertiesCompletion', export_type, unicode(str(self.package.filename), encoding), invalid_properties_str)
# No longer used - Task 1080, jrf
# def handleLoadTutorial(self, client):
# """
# Loads the tutorial file, from the Help menu
# """
# filename = self.config.webDir.joinpath("docs")\
# .joinpath("eXe-tutorial.elp")
# self.handleLoadPackage(client, filename)
def progressDownload(self, numblocks, blocksize, filesize, client):
try:
percent = min((numblocks * blocksize * 100) / filesize, 100)
if percent < 0:
percent = 0
except:
percent = 100
client.sendScript('Ext.MessageBox.updateProgress(%f, "%d%%", "%s")' % (float(percent) / 100, percent, _("Downloading...")))
log.info('%3d' % (percent))
def isConnected(self, hostname):
try:
if sys.platform=='darwin' and hasattr(sys, 'frozen'):
verify = 'cacerts.txt'
urlretrieve(hostname,context=ssl.create_default_context(cafile=verify))
else:
urlretrieve(hostname)
log.debug('eXe can reach host %s without problems'%(hostname))
return True
except Exception, e:
log.error('Error checking host %s is %s'%(hostname, e.strerror))
return False
def handleSourcesDownload(self, client):
"""
Download taxon sources from url and deploy in $HOME/.exe/classification_sources
"""
if not self.isConnected("https://github.com/"):
client.sendScript('Ext.MessageBox.alert("%s", "%s" )' % (_("Sources Download"), _("Could not retrieve data (Core error)")))
return None
url = 'https://github.com/exelearning/classification_sources/raw/master/classification_sources.zip'
client.sendScript('Ext.MessageBox.progress("%s", "%s")' %(_("Sources Download"), _("Connecting to classification sources repository...")))
def successDownload(result):
filename = result[0]
log.debug("successDownload filename: %s"%(filename))
if not zipfile.is_zipfile(filename):
log.error("filename not is zip file: %s"%(filename))
log.error("Filename exists: %s"%(os.path.exists(filename)))
client.sendScript('Ext.MessageBox.alert("%s", "%s" )' % (_("Sources Download"), _("There has been an error while trying to download classification sources. Please try again later.")))
return None
zipFile = zipfile.ZipFile(filename, "r")
try:
zipFile.extractall(G.application.config.configDir)
log.debug("Extracted in %s"%(G.application.config.configDir))
client.sendScript('Ext.MessageBox.hide()')
except Exception, e:
log.error('Error extracting file %s in %s is: %s'%(filename, G.application.config.configDir, e.strerror))
finally:
Path(filename).remove()
log.debug("Deleted %s"%(filename))
if (sys.platform=='darwin' and hasattr(sys, 'frozen')):
cafile = "cacerts.txt"
try:
d = threads.deferToThread(urlretrieve, url, "/tmp/classification_sources.zip", lambda n, b, f: self.progressDownload(n, b, f, client), context=ssl.create_default_context(cafile=cafile))
d.addCallback(successDownload)
except Exception, e:
log.error('Error downloading url %s is %s'%(url, e.strerror))
elif (sys.platform=='darwin'):
d = threads.deferToThread(urlretrieve, url, "/tmp/classification_sources.zip", lambda n, b, f: self.progressDownload(n, b, f, client))
d.addCallback(successDownload)
else:
d = threads.deferToThread(urlretrieve, url, None, lambda n, b, f: self.progressDownload(n, b, f, client))
d.addCallback(successDownload)
def handleOverwriteLocalStyle(self, client, style_dir, downloaded_file):
"""
Delete locally installed style and import new version from URL
"""
stylemanager = StyleManagerPage(self)
stylemanager.client = client
stylemanager.overwriteLocalStyle(style_dir, downloaded_file)
def handleReload(self, client):
self.location_buttons.updateText()
client.sendScript('eXe.app.gotoUrl()', filter_func=allSessionClients)
def handleRemoveTempDir(self, client, tempdir, rm_top_dir):
"""
Removes a temporary directory and any contents therein
(from the bottom up), and yup, that's all!
#
# swiped from an example on:
# http://docs.python.org/lib/os-file-dir.html
################################################################
# Delete everything reachable from the directory named in 'top',
# assuming there are no symbolic links.
# CAUTION: This is dangerous! For example, if top == '/', it
# could delete all your disk files.
"""
top = tempdir
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
##################################################################
# and finally, go ahead and remove the top-level tempdir itself:
if int(rm_top_dir) != 0:
os.rmdir(tempdir)
def get_printdir_relative2web(self, exported_dir):
"""
related to the following ClearParentTempPrintDirs(), return a
local URL corresponding to the exported_dir
"""
rel_name = exported_dir[len(G.application.tempWebDir):]
if sys.platform[:3] == "win":
rel_name = rel_name.replace('\\', '/')
if rel_name.startswith('/'):
rel_name = rel_name[1:]
http_relative_pathname = '%s/%s'%(G.application.exeAppUri, rel_name)
log.debug('printdir http_relative_pathname=%s'%(http_relative_pathname))
return http_relative_pathname
def ClearParentTempPrintDirs(self, client, log_dir_warnings):
"""
Determine the parent temporary printing directory, and clear them
if safe to do so (i.e., if not the config dir itself, for example)
Makes (if necessary), and clears out (if applicable) the parent
temporary directory.
The calling handleClearAndMakeTempPrintDir() shall then make a
specific print-job subdirectory.
"""
#
# Create the parent temp print dir as hardcoded under the webdir, as:
# http://temp_print_dirs
# (eventually may want to allow this information to be configured by
# the user, stored in globals, etc.)
web_dirname = G.application.tempWebDir
under_dirname = os.path.join(web_dirname, "temp_print_dirs")
clear_tempdir = 0
dir_warnings = ""
# but first need to ensure that under_dirname itself is available;
# if not, create it:
if cmp(under_dirname, "") != 0:
if os.path.exists(under_dirname):
if os.path.isdir(under_dirname):
# Yes, this directory already exists.
# pre-clean it, keeping the clutter down:
clear_tempdir = 1
else:
dir_warnings = "WARNING: The desired Temporary Print " \
+ "Directory, \"" + under_dirname \
+ "\", already exists, but as a file!\n"
if log_dir_warnings:
log.warn("ClearParentTempPrintDirs(): The desired " \
+ "Temporary Print Directory, \"%s\", " \
+ "already exists, but as a file!", \
under_dirname)
under_dirname = web_dirname
# but, we can't just put the tempdirs directly underneath
# the webDir, since no server object exists for it.
# So, as a quick and dirty solution, go ahead and put
# them in the images folder:
under_dirname = os.path.join(under_dirname, "images")
dir_warnings += " RECOMMENDATION: please " \
+ "remove/rename this file to allow eXe easier "\
+ "management of its temporary print files.\n"
dir_warnings += " eXe will create the temporary " \
+ "printing directory directly under \"" \
+ under_dirname + "\" instead, but this might "\
+ "leave some files around after eXe terminates..."
if log_dir_warnings:
log.warn(" RECOMMENDATION: please remove/rename "\
+ "this file to allow eXe easier management of "\
+ "its temporary print files.")
log.warn(" eXe will create the temporary " \
+ "printing directory directly under \"%s\" " \
+ "instead, but this might leave some files " \
+ "around after eXe terminates...", \
under_dirname)
# and note that we do NOT want to clear_tempdir
# on the config dir itself!!!!!
else:
os.makedirs(under_dirname)
# and while we could clear_tempdir on it, there's no need to.
if clear_tempdir:
# before making this particular print job's temporary print
# directory underneath the now-existing temp_print_dirs,
# go ahead and clear out temp_print_dirs such that we have
# AT MOST one old temporary set of print job files still existing
# once eXe terminates:
rm_topdir = "0"
# note: rm_topdir is passed in as a STRING since
# handleRemoveTempDir expects as such from nevow's
# clientToServerEvent() call:
self.handleRemoveTempDir(client, under_dirname, rm_topdir)
return under_dirname, dir_warnings
def handleClearAndMakeTempPrintDir(self, client, suffix, prefix, \
callback):
"""
Makes a temporary printing directory, and yup, that's pretty much it!
"""
# First get the name of the parent temp directory, after making it
# (if necessary) and clearing (if applicable):
log_dir_warnings = 1
(under_dirname, dir_warnings) = self.ClearParentTempPrintDirs( \
client, log_dir_warnings)
# Next, go ahead and create this particular print job's temporary
# directory under the parent temp directory:
temp_dir = mkdtemp(suffix, prefix, under_dirname)
# Finally, pass the created temp_dir back to the expecting callback:
client.call(callback, temp_dir, dir_warnings)
def handleTinyMCEimageChoice(self, client, tinyMCEwin, tinyMCEwin_name, \
tinyMCEfield, local_filename, preview_filename):
"""
Once an image is selected in the file browser that is spawned by the
TinyMCE image dialog, copy this file (which is local to the user's
machine) into the server space, under a preview directory
(after checking if this exists, and creating it if necessary).
Note that this IS a "cheat", in violation of the client-server
separation, but can be done since we know that the eXe server is
actually sitting on the client host.
"""
server_filename = ""
errors = 0
log.debug('handleTinyMCEimageChoice: image local = ' + local_filename
+ ', base=' + os.path.basename(local_filename))
webDir = Path(G.application.tempWebDir)
previewDir = webDir.joinpath('previews')
if not previewDir.exists():
log.debug("image previews directory does not yet exist; " \
+ "creating as %s " % previewDir)
previewDir.makedirs()
elif not previewDir.isdir():
client.alert( \
_(u'Preview directory %s is a file, cannot replace it') \
% previewDir)
log.error("Couldn't preview tinyMCE-chosen image: " +
"Preview dir %s is a file, cannot replace it" \
% previewDir)
errors += 1
if errors == 0:
log.debug('handleTinyMCEimageChoice: originally, local_filename='
+ local_filename)
local_filename = unicode(local_filename, 'utf-8')
log.debug('handleTinyMCEimageChoice: in unicode, local_filename='
+ local_filename)
localImagePath = Path(local_filename)
log.debug('handleTinyMCEimageChoice: after Path, localImagePath= '
+ localImagePath)
if not localImagePath.exists() or not localImagePath.isfile():
client.alert( \
_(u'Local file %s is not found, cannot preview it') \
% localImagePath)
log.error("Couldn't find tinyMCE-chosen image: %s" \
% localImagePath)
errors += 1
try:
# joinpath needs its join arguments to already be in Unicode:
#preview_filename = toUnicode(preview_filename);
# but that's okay, cuz preview_filename is now URI safe, right?
log.debug('URIencoded preview filename=' + preview_filename)
server_filename = previewDir.joinpath(preview_filename)
log.debug("handleTinyMCEimageChoice copying image from \'"\
+ local_filename + "\' to \'" \
+ server_filename.abspath() + "\'.")
shutil.copyfile(local_filename, \
server_filename.abspath())
# new optional description file to provide the
# actual base filename, such that once it is later processed
# copied into the resources directory, it can be done with
# only the basename. Otherwise the resource filenames
# are too long for some users, preventing them from making
# backup CDs of the content, for example.
#
# Remember that the full path of the
# file is only used here as an easy way to keep the names
# unique WITHOUT requiring a roundtrip call from the Javascript
# to this server, and back again, a process which does not
# seem to work with tinyMCE in the mix. BUT, once tinyMCE's
# part is done, and this image processed, it can be returned
# to just its basename, since the resource parts have their
# own unique-ification mechanisms already in place.
descrip_file_path = Path(server_filename + ".exe_info")
log.debug("handleTinyMCEimageChoice creating preview " \
+ "description file \'" \
+ descrip_file_path.abspath() + "\'.")
descrip_file = open(descrip_file_path, 'wb')
# safety measures against TinyMCE, otherwise it will
# later take ampersands and entity-escape them into '&',
# and filenames with hash signs will not be found, etc.:
unspaced_filename = local_filename.replace(' ', '_')
unhashed_filename = unspaced_filename.replace('#', '_num_')
unamped_local_filename = unhashed_filename.replace('&', '_and_')
log.debug("and setting new file basename as: "
+ unamped_local_filename)
my_basename = os.path.basename(unamped_local_filename)
descrip_file.write((u"basename=" + my_basename).encode('utf-8'))
descrip_file.flush()
descrip_file.close()
client.sendScript('eXe.app.fireEvent("previewTinyMCEImageDone")')
except Exception, e:
client.alert(_('SAVE FAILED!\n%s') % str(e))
log.error("handleTinyMCEimageChoice unable to copy local image "
+ "file to server prevew, error = " + str(e))
raise
def handleUploadFileToResources(self, client, local_file, preview_filename):
server_filename = ""
errors = 0
webDir = Path(G.application.tempWebDir)
previewDir = webDir.joinpath('previews')
if not previewDir.exists():
log.debug("files previews directory does not yet exist; " \
+ "creating as %s " % previewDir)
previewDir.makedirs()
elif not previewDir.isdir():
client.alert(\
_(u'Preview directory %s is a file, cannot replace it') \
% previewDir)
log.error("Couldn't preview file: " +
"Preview dir %s is a file, cannot replace it" \
% previewDir)
errors += 1
# else:
# This will remove the directory content, but we might want to record more than one file before saving
# shutil.rmtree(previewDir)
# previewDir.makedirs()
if errors == 0:
log.debug('originally, local_file='
+ local_file)
log.debug('in unicode, local_file='
+ local_file)
localFilePath = Path(local_file)
log.debug('after Path, localImagePath= '
+ localFilePath)
try:
log.debug('URIencoded preview filename=' + preview_filename)
server_filename = previewDir.joinpath(preview_filename)
server_file = open(server_filename, 'wb')
local_file = local_file.split(";base64,",1)
local_file = local_file[1]
server_file.write(base64.b64decode(local_file))
server_file.flush()
server_file.close()
sf = str(server_filename)
# convert webm to mp3
# webm_version = AudioSegment.from_file(sf,"webm")
# webm_version.export(server_filename.replace(".webm",".mp3"), format="mp3")
client.sendScript('eXe.app.fireEvent("uploadFileToResourcesDone")')
except Exception, e:
client.alert(_('SAVE FAILED!\n%s') % str(e))
log.error("Unable to save file "
+ "file to server prevew, error = " + str(e))
raise
def handleTinyMCEimageDragDrop(self, client, tinyMCEwin, tinyMCEwin_name, \
local_filename, preview_filename):
server_filename = ""
errors = 0
log.debug('handleTinyMCEimageChoice: image local = ' + local_filename
+ ', base=' + os.path.basename(local_filename))
webDir = Path(G.application.tempWebDir)
previewDir = webDir.joinpath('previews')
if not previewDir.exists():
log.debug("image previews directory does not yet exist; " \
+ "creating as %s " % previewDir)
previewDir.makedirs()
elif not previewDir.isdir():
client.alert(\
_(u'Preview directory %s is a file, cannot replace it') \
% previewDir)
log.error("Couldn't preview tinyMCE-chosen image: " +
"Preview dir %s is a file, cannot replace it" \
% previewDir)
errors += 1
if errors == 0:
log.debug('handleTinyMCEimageChoice: originally, local_filename='
+ local_filename)
log.debug('handleTinyMCEimageChoice: in unicode, local_filename='
+ local_filename)
localImagePath = Path(local_filename)
log.debug('handleTinyMCEimageChoice: after Path, localImagePath= '
+ localImagePath)
try:
log.debug('URIencoded preview filename=' + preview_filename)
server_filename = previewDir.joinpath(preview_filename)
descrip_file_path = Path(server_filename + ".exe_info")
log.debug("handleTinyMCEimageDragDrop creating preview " \
+ "description file \'" \
+ descrip_file_path.abspath() + "\'.")
descrip_file = open(server_filename, 'wb')
local_filename = local_filename.replace('data:image/jpeg;base64,', '')
descrip_file.write(base64.b64decode(local_filename))
descrip_file.flush()
descrip_file.close()
client.sendScript('eXe.app.fireEvent("previewTinyMCEDragDropImageDone","'+preview_filename+'")')
except Exception, e:
client.alert(_('SAVE FAILED!\n%s') % str(e))
log.error("handleTinyMCEimageDragDrop unable to copy local image "
+ "file to server prevew, error = " + str(e))
raise
def handleTinyMCEmath(self, client, tinyMCEwin, tinyMCEwin_name, \
tinyMCEfield, latex_source, math_fontsize, \
preview_image_filename, preview_math_srcfile):
"""
Based off of handleTinyMCEimageChoice(),
handleTinyMCEmath() is similar in that it places a .gif math image
(and a corresponding .tex LaTeX source file) into the previews dir.
Rather than copying the image from a user-selected directory, though,
this routine actually generates the math image using mimetex.
"""
server_filename = ""
errors = 0
webDir = Path(G.application.tempWebDir)
previewDir = webDir.joinpath('previews')
if not previewDir.exists():
log.debug("image previews directory does not yet exist; " \
+ "creating as %s " % previewDir)
previewDir.makedirs()
elif not previewDir.isdir():
client.alert( \
_(u'Preview directory %s is a file, cannot replace it') \
% previewDir)
log.error("Couldn't preview tinyMCE-chosen image: " +
"Preview dir %s is a file, cannot replace it" \
% previewDir)
errors += 1
#if errors == 0:
# localImagePath = Path(local_filename)
# if not localImagePath.exists() or not localImagePath.isfile():
# client.alert( \
# _(u'Image file %s is not found, cannot preview it') \
# % localImagePath)
# log.error("Couldn't find tinyMCE-chosen image: %s" \
# % localImagePath)
# callback_errors = "Image file %s not found, cannot preview" \
# % localImagePath
# errors += 1
# the mimetex usage code was swiped from the Math iDevice:
if latex_source != "":
# first write the latex_source out into the preview_math_srcfile,
# such that it can then be passed into the compile command:
math_filename = previewDir.joinpath(preview_math_srcfile)
math_filename_str = math_filename.abspath().encode('utf-8')
log.info("handleTinyMCEmath: using LaTeX source: " + latex_source)
log.debug("writing LaTeX source into \'" \
+ math_filename_str + "\'.")
math_file = open(math_filename, 'wb')
# do we need to append a \n here?:
math_file.write(latex_source)
math_file.flush()
math_file.close()
try:
use_latex_sourcefile = math_filename_str
tempFileName = compile(use_latex_sourcefile, math_fontsize, \
latex_is_file=True)
except Exception, e:
client.alert(_('Could not create the image') + " (LaTeX)","$exeAuthoring.errorHandler('handleTinyMCEmath')")
log.error("handleTinyMCEmath unable to compile LaTeX using "
+ "mimetex, error = " + str(e))
raise
# copy the file into previews
server_filename = previewDir.joinpath(preview_image_filename)
log.debug("handleTinyMCEmath copying math image from \'"\
+ tempFileName + "\' to \'" \
+ server_filename.abspath().encode('utf-8') + "\'.")
shutil.copyfile(tempFileName, \
server_filename.abspath().encode('utf-8'))
# Delete the temp file made by compile
Path(tempFileName).remove()
return
def handleTinyMCEmathML(self, client, tinyMCEwin, tinyMCEwin_name, \
tinyMCEfield, mathml_source, math_fontsize, \
preview_image_filename, preview_math_srcfile):
"""
See self.handleTinyMCEmath
To do: This should generate an image from MathML code, not from LaTeX code.
"""
# Provisional (just an alert message)
client.alert(_('Could not create the image') + " (MathML)","$exeAuthoring.errorHandler('handleTinyMCEmathML')")
return
server_filename = ""
errors = 0
webDir = Path(G.application.tempWebDir)
previewDir = webDir.joinpath('previews')
if not previewDir.exists():
log.debug("image previews directory does not yet exist; " \
+ "creating as %s " % previewDir)
previewDir.makedirs()
elif not previewDir.isdir():
client.alert( \
_(u'Preview directory %s is a file, cannot replace it') \
% previewDir)
log.error("Couldn't preview tinyMCE-chosen image: " +
"Preview dir %s is a file, cannot replace it" \
% previewDir)
errors += 1
# the mimetex usage code was swiped from the Math iDevice:
if mathml_source != "":
# first write the mathml_source out into the preview_math_srcfile,
# such that it can then be passed into the compile command:
math_filename = previewDir.joinpath(preview_math_srcfile)
math_filename_str = math_filename.abspath().encode('utf-8')
log.info("handleTinyMCEmath: using LaTeX source: " + mathml_source)
log.debug("writing LaTeX source into \'" \
+ math_filename_str + "\'.")
math_file = open(math_filename, 'wb')
# do we need to append a \n here?:
math_file.write(mathml_source)
math_file.flush()
math_file.close()
try:
use_mathml_sourcefile = math_filename_str
tempFileName = compile(use_mathml_sourcefile, math_fontsize, \
latex_is_file=True)
except Exception, e:
client.alert(_('Could not create the image') + " (MathML)","$exeAuthoring.errorHandler('handleTinyMCEmathML')")
log.error("handleTinyMCEmathML unable to compile MathML using "
+ "mimetex, error = " + str(e))
raise
# copy the file into previews
server_filename = previewDir.joinpath(preview_image_filename)
log.debug("handleTinyMCEmath copying math image from \'"\
+ tempFileName + "\' to \'" \
+ server_filename.abspath().encode('utf-8') + "\'.")
shutil.copyfile(tempFileName, \
server_filename.abspath().encode('utf-8'))
# Delete the temp file made by compile
Path(tempFileName).remove()
return
def getResources(self, dirname, html, client):
Resources.cancel = False
self.importresources = Resources(dirname, self.package.findNode(client.currentNodeId), client)
# import cProfile
# import lsprofcalltree
# p = cProfile.Profile()
# p.runctx( "resources.insertNode()",globals(),locals())
# k = lsprofcalltree.KCacheGrind(p)
# data = open('exeprof.kgrind', 'w+')
# k.output(data)
# data.close()
self.importresources.insertNode([html.partition(dirname + os.sep)[2]])
def handleImport(self, client, importType, path, html=None):
if importType == 'html':
if (not html):
client.call('eXe.app.getController("Toolbar").importHtml2', path)
else:
d = threads.deferToThread(self.getResources, path, html, client)
d.addCallback(self.handleImportCallback, client)
d.addErrback(self.handleImportErrback, client)
client.call('eXe.app.getController("Toolbar").initImportProgressWindow', _(u'Importing HTML...'))
if importType.startswith('lom'):
try:
setattr(self.package, importType, lomsubs.parse(path))
client.call('eXe.app.getController("MainTab").lomImportSuccess', importType)
except Exception, e:
client.alert(_('LOM Metadata import FAILED!\n%s') % str(e))
def handleImportErrback(self, failure, client):
client.alert(_(u'Error importing HTML:\n') + unicode(failure.getBriefTraceback()), \
(u'eXe.app.gotoUrl("/%s")' % self.package.name).encode('utf8'), filter_func=otherSessionPackageClients)
def handleImportCallback(self, resources, client):
client.call('eXe.app.getController("Toolbar").closeImportProgressWindow')
client.sendScript((u'eXe.app.gotoUrl("/%s")' % \
self.package.name).encode('utf8'), filter_func=allSessionPackageClients)
def handleCancelImport(self, client):
log.info('Cancel import')
Resources.cancelImport()
def handleExportProcomun(self, client):
# If the user hasn't done the OAuth authentication yet, start this process
if not client.session.oauthToken.get('procomun'):
verify = True
if hasattr(sys, 'frozen'):
verify = 'cacerts.txt'
oauth2Session = OAuth2Session(ProcomunOauth.CLIENT_ID, redirect_uri=ProcomunOauth.REDIRECT_URI)
oauth2Session.verify = verify
authorization_url, state = oauth2Session.authorization_url(ProcomunOauth.AUTHORIZATION_BASE_URL)
self.webServer.oauth.procomun.saveState(state, oauth2Session, client)
# Call the script to start the Procomún authentication process
client.call('eXe.app.getController("Toolbar").getProcomunAuthToken', authorization_url)
return
def exportScorm():
"""
Exports the package we are about to upload to Procomún to SCORM 1.2.
:returns: Full path to the exported ZIP.
"""
# Update progress for the user
client.call('Ext.MessageBox.updateProgress', 0.3, '30%', _(u'Exporting package as SCORM 1.2...'))
stylesDir = self.config.stylesDir / self.package.style
fd, filename = mkstemp('.zip')
os.close(fd)
scorm = ScormExport(self.config, stylesDir, filename, 'scorm1.2')
scorm.export(self.package)
return filename
def publish(filename):
"""
Upload the exported package to Procomún.
:param filename: Full path to the exported ZIP.
"""
# Update progress for the user
client.call('Ext.MessageBox.updateProgress', 0.7, '70%', _(u'Uploading package to Procomún...'))
# Get OAuth Acess Token and add it to the request headers
token = client.session.oauthToken['procomun']
headers = {
'Authorization': 'Bearer %s' % str(token['access_token']),
'Connection': 'close'
}
# Create the WSDL client
procomun = Client(PROCOMUN_WSDL, headers=headers)
# Create and configure the ODE object
ode = procomun.factory.create('xsd:anyType')
ode.file = base64.b64encode(open(filename, 'rb').read())
ode.file_name = self.package.name
# Try to upload the ODE to Procomún
try:
result = procomun.service.odes_soap_create(ode)
except Exception as e:
# If there is an exception, log it and show a generic error message to the user
log.error('An error has ocurred while trying to publish a package to Procomún. The error message is: %s', str(e))
client.call('Ext.MessageBox.hide')
client.alert(_(u'Unknown error when trying to upload package to Procomún.'), title=_(u'Publishing document to Procomún'))
return
# Parse the result received from Procomún
parsedResult = {}
for item in result.item:
parsedResult[item.key] = item.value
if str(item.key) == 'data':
parsedResult[item.key] = {}
if item.value:
parsedResult[item.key][item.value.item.key] = item.value.item.value
# Show a message to the user based on the result
client.call('Ext.MessageBox.hide')
if parsedResult['status'] == 'true':
link_url = ProcomunOauth.BASE_URL + '/ode/view/%s' % parsedResult['data']['documentId']
client.alert(
js(
'\''
+ _(u'Package exported to <a href="%s" target="_blank" title="Click to view the exported package">%s</a>.') % (link_url, self.package.title)
+ u'<br />'
+ u'<br />'
+ _(u'<small>You can view and manage the uploaded package using <a href="%s" target="_blank" title="Procomún Home">Procomún</a>\\\'s web page.</small>').replace('>',' style="font-size:1em">') % ProcomunOauth.BASE_URL
+ '\''
),
title=_(u'Publishing document to Procomún')
)
else:
client.alert(
js(
'\'<h3>'
+ _(u'Error exporting package "%s" to Procomún.') % self.package.name
+ u'</h3><br />'
+ _(u'The most common reasons for this are:')
+ u'<br />'
+ _(u'1. Package metadata is not properly filled.')
+ u'<br />'
+ _(u'2. There is a problem with you connection (or with Procomún servers), so you should just try again later.')
+ u'<br /><br />'
+ _(u'If you have problems publishing you can close this dialogue, export as SCORM 2004 and upload the generated zip file manually to Procomún.')
+ u'<br /><br />'
+ _(u'The reported error we got from Procomún was: <pre>%s</pre>') % parsedResult['message']
+ '\''
),
title=_(u'Publishing document to Procomún')
)
d = threads.deferToThread(exportScorm)
d.addCallback(lambda filename: threads.deferToThread(publish, filename))
def handleExport(self, client, exportType, filename):
"""
Called by js.
Exports the current package to one of the above formats
'exportType' can be one of 'singlePage' 'webSite' 'zipFile'
'textFile' or 'scorm'
'filename' is a file for scorm pages, and a directory for websites
"""
webDir = Path(self.config.webDir)
#stylesDir = webDir.joinpath('style', self.package.style)
stylesDir = self.config.stylesDir / self.package.style
filename = Path(filename, 'utf-8')
exportDir = Path(filename).dirname()
if exportDir and not exportDir.exists():
client.alert(_(u'Cannot access directory named ') +
unicode(exportDir) +
_(u'. Please use ASCII names.'))
return
name = str(filename.basename().splitext()[0])
if name.upper() in forbiddenPageNames:
client.alert(_('SAVE FAILED!\n"%s" is not a valid name for the file') % str(name))
return
"""
adding the print feature in using the same export functionality:
"""
if exportType == 'singlePage' or exportType == 'printSinglePage':
printit = 0
if exportType == 'printSinglePage':
printit = 1
exported_dir = self.exportSinglePage(client, filename, webDir, \
stylesDir, printit)
if printit == 1 and exported_dir is not None:
web_printdir = self.get_printdir_relative2web(exported_dir)
G.application.config.browser.open(web_printdir)
elif exportType == 'webSite':
self.exportWebSite(client, filename, stylesDir)
elif exportType == 'csvReport':
self.exportReport(client, filename, stylesDir)
elif exportType == 'zipFile':
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportWebZip(client, filename, stylesDir)
elif exportType == 'textFile':
self.exportText(client, filename)
elif exportType == 'scorm1.2':
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportScorm(client, filename, stylesDir, "scorm1.2")
elif exportType == "scorm2004":
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportScorm(client, filename, stylesDir, "scorm2004")
elif exportType == "agrega":
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportScorm(client, filename, stylesDir, "agrega")
elif exportType == 'epub3':
filename = self.b4save(client, filename, '.epub', _(u'EXPORT FAILED!'))
self.exportEpub3(client, filename, stylesDir)
elif exportType == "commoncartridge":
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportScorm(client, filename, stylesDir, "commoncartridge")
elif exportType == 'mxml':
self.exportXML(client, filename, stylesDir)
else:
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
self.exportIMS(client, filename, stylesDir)
def handleQuit(self, client):
"""
Stops the server
"""
# first, go ahead and clear out any temp job files still in
# the temporary print directory:
log_dir_warnings = 0
# don't warn of any issues with the directories at quit,
# since already warned at initial directory creation
(parent_temp_print_dir, dir_warnings) = \
self.ClearParentTempPrintDirs(client, log_dir_warnings)
client.close("window.location = \"quit\";")
if len(self.clientHandleFactory.clientHandles) <= 1:
self.webServer.monitoring = False
G.application.config.configParser.set('user', 'lastDir', G.application.config.lastDir)
try:
shutil.rmtree(G.application.tempWebDir, True)
shutil.rmtree(G.application.resourceDir, True)
except:
log.debug('Don\'t delete temp directorys. ')
reactor.callLater(2, reactor.stop)
else:
log.debug("Not quiting. %d clients alive." % len(self.clientHandleFactory.clientHandles))
def handleSaveEXeUIversion(self,client,status):
initial=G.application.config.configParser.get('user', 'eXeUIversion')
if initial == '2':
client.call(u'eXe.app.getController("Toolbar").exeUIalert')
G.application.config.configParser.set('user', 'eXeUIversion', status)
client.call(u'eXe.app.getController("Toolbar").eXeUIversionSetStatus', status)
def handleIsExeUIAdvanced(self,client):
status=G.application.config.configParser.get('user', 'eXeUIversion')
client.call(u'eXe.app.getController("Toolbar").exeUIsetInitialStatus', status)
def handleBrowseURL(self, client, url):
"""
visit the specified URL using the system browser
if the URL contains %s, substitute the local webDir
"""
url = url.replace('%s', self.config.webDir)
log.debug(u'browseURL: ' + url)
if hasattr(os, 'startfile'):
os.startfile(url)
else:
G.application.config.browser.open(url, new=True)
def handleMergeXliffPackage(self, client, filename, from_source):
"""
Parse the XLIFF file and import the contents based on
translation-unit id-s
"""
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
from_source = True if from_source == "true" else False
try:
importer = XliffImport(self.package, unquote(filename).encode(encoding))
importer.parseAndImport(from_source)
client.alert(_(u'Correct XLIFF import'), (u'eXe.app.gotoUrl("/%s")' % \
self.package.name).encode('utf8'), filter_func=otherSessionPackageClients)
except Exception, e:
client.alert(_(u'Error importing XLIFF: %s') % e, (u'eXe.app.gotoUrl("/%s")' % \
self.package.name).encode('utf8'), filter_func=otherSessionPackageClients)
def handleInsertPackage(self, client, filename):
"""
Load the package and insert in current node
"""
# For templates, we need to set isChanged to True to prevent the
# translation mechanism to execute
if not self.package.isChanged and self.package.isTemplate:
self.package.isChanged = True
package = self._loadPackage(client, filename, newLoad=True, preventUpdateRecent=True)
tmpfile = Path(tempfile.mktemp())
package.save(tmpfile, preventUpdateRecent=True)
loadedPackage = self._loadPackage(client, tmpfile, newLoad=False, destinationPackage=self.package, preventUpdateRecent=True)
newNode = loadedPackage.root.copyToPackage(self.package,
self.package.currentNode)
# trigger a rename of all of the internal nodes and links,
# and to add any such anchors into the dest package via isMerge:
newNode.RenamedNodePath(isMerge=True)
try:
tmpfile.remove()
except:
pass
client.sendScript((u'eXe.app.gotoUrl("/%s")' % \
self.package.name).encode('utf8'), filter_func=allSessionPackageClients)
def handleExtractPackage(self, client, filename, existOk):
"""
Create a new package consisting of the current node and export
'existOk' means the user has been informed of existance and ok'd it
"""
filename = Path(filename, 'utf-8')
saveDir = filename.dirname()
if saveDir and not saveDir.exists():
client.alert(_(u'Cannot access directory named ') + unicode(saveDir) + _(u'. Please use ASCII names.'))
return
# Add the extension if its not already there
if not filename.lower().endswith('.elp'):
filename += '.elp'
if Path(filename).exists() and existOk != 'true':
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXTRACT FAILED!\n%s') % msg)
return
try:
# Create a new package for the extracted nodes
newPackage = self.package.extractNode()
# trigger a rename of all of the internal nodes and links,
# and to remove any old anchors from the dest package,
# and remove any zombie links via isExtract:
newNode = newPackage.root
if newNode:
newNode.RenamedNodePath(isExtract=True)
# Save the new package
newPackage.save(filename)
except Exception, e:
client.alert(_('EXTRACT FAILED!\n%s') % str(e))
raise
client.alert(_(u'Package extracted to: %s') % filename)
def handleCreateDir(self, client, currentDir, newDir):
try:
d = Path(currentDir, 'utf-8') / newDir
d.makedirs()
client.sendScript(u"""eXe.app.getStore('filepicker.DirectoryTree').load({
callback: function() {
eXe.app.fireEvent( "dirchange", %s );
}
})""" % json.dumps(d))
except OSError:
client.alert(_(u"Directory exists"))
except:
log.exception("")
# Public Methods
"""
Exports to Ustad Mobile XML
"""
def exportXML(self, client, filename, stylesDir):
try:
xmlExport = XMLExport(self.config, stylesDir, filename)
xmlExport.export(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
def exportSinglePage(self, client, filename, webDir, stylesDir, \
printFlag):
"""
Export 'client' to a single web page,
'webDir' is just read from config.webDir
'stylesDir' is where to copy the style sheet information from
'printFlag' indicates whether or not this is for print
(and whatever else that might mean)
"""
try:
imagesDir = webDir.joinpath('images')
scriptsDir = webDir.joinpath('scripts')
cssDir = webDir.joinpath('css')
templatesDir = webDir.joinpath('templates')
# filename is a directory where we will export the website to
# We assume that the user knows what they are doing
# and don't check if the directory is already full or not
# and we just overwrite what's already there
filename = Path(filename)
# Append the package name to the folder path if necessary
if filename.basename() != self.package.name:
filename /= self.package.name
if not filename.exists():
filename.makedirs()
elif not filename.isdir():
client.alert(_(u'Filename %s is a file, cannot replace it') %
filename)
log.error("Couldn't export web page: " +
"Filename %s is a file, cannot replace it" % filename)
return
else:
client.alert(_(u'Folder name %s already exists. '
'Please choose another one or delete existing one then try again.') % filename)
return
# Now do the export
singlePageExport = SinglePageExport(stylesDir, filename, \
imagesDir, scriptsDir, cssDir, templatesDir)
singlePageExport.export(self.package, printFlag)
has_uncut_resources = False
if G.application.config.cutFileName == "1":
has_uncut_resources = singlePageExport.hasUncutResources()
except Exception, e:
client.alert(_('SAVE FAILED!\n%s') % str(e))
raise
# Show the newly exported web site in a new window
if not printFlag:
self._startFile(filename)
if client:
if not has_uncut_resources:
client.alert(_(u'Exported to %s') % filename)
else:
client.alert(_(u'Exported to %s.\nThere were some resources that couldn\'t be renamed to be compatible with ISO9660.') % filename)
# and return a string of the actual directory name,
# in case the package name was added, etc.:
return filename.abspath().encode('utf-8')
# WARNING: the above only returns the RELATIVE pathname
def exportWebSite(self, client, filename, stylesDir):
"""
Export 'client' to a web site,
'webDir' is just read from config.webDir
'stylesDir' is where to copy the style sheet information from
"""
try:
# filename is a directory where we will export the website to
# We assume that the user knows what they are doing
# and don't check if the directory is already full or not
# and we just overwrite what's already there
filename = Path(filename)
# Append the package name to the folder path if necessary
if filename.basename() != self.package.name:
filename /= self.package.name
if not filename.exists():
filename.makedirs()
elif not filename.isdir():
if client:
client.alert(_(u'Filename %s is a file, cannot replace it') %
filename)
log.error("Couldn't export web page: " +
"Filename %s is a file, cannot replace it" % filename)
return
else:
if client:
client.alert(_(u'Folder name %s already exists. '
'Please choose another one or delete existing one then try again.') % filename)
return
# Now do the export
websiteExport = WebsiteExport(self.config, stylesDir, filename)
websiteExport.export(self.package)
has_uncut_resources = False
if G.application.config.cutFileName == "1":
has_uncut_resources = websiteExport.hasUncutResources()
except Exception, e:
if client:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
if client:
if not has_uncut_resources:
client.alert(_(u'Exported to %s') % filename)
else:
client.alert(_(u'Exported to %s.\nThere were some resources that couldn\'t be renamed to be compatible with ISO9660.') % filename)
# Show the newly exported web site in a new window
self._startFile(filename)
def exportWebZip(self, client, filename, stylesDir):
try:
log.debug(u"exportWebsite, filename=%s" % filename)
filename = Path(filename)
# Do the export
filename = self.b4save(client, filename, '.zip', _(u'EXPORT FAILED!'))
websiteExport = WebsiteExport(self.config, stylesDir, filename)
websiteExport.exportZip(self.package)
has_uncut_resources = False
if G.application.config.cutFileName == "1":
has_uncut_resources = websiteExport.hasUncutResources()
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
if not has_uncut_resources:
client.alert(_(u'Exported to %s') % filename)
else:
client.alert(_(u'Exported to %s.\nThere were some resources that couldn\'t be renamed to be compatible with ISO9660.') % filename)
def exportText(self, client, filename):
try:
filename = Path(filename)
log.debug(u"exportWebsite, filename=%s" % filename)
# Append an extension if required
if not filename.lower().endswith('.txt'):
filename += '.txt'
if Path(filename).exists():
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXPORT FAILED!\n%s') % msg)
return
# Do the export
textExport = TextExport(filename)
textExport.export(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
client.alert(_(u'Exported to %s') % filename)
def handleXliffExport(self, client, filename, source, target, copy, cdata):
"""
Exports this package to a XLIFF file
"""
copy = True if copy == "true" else False
cdata = True if cdata == "true" else False
try:
filename = Path(filename, 'utf-8')
log.debug(u"exportXliff, filename=%s" % filename)
if not filename.lower().endswith('.xlf'):
filename += '.xlf'
name = str(filename.basename().splitext()[0])
if name.upper() in forbiddenPageNames:
client.alert(_('SAVE FAILED!\n"%s" is not a valid name for the file') % str(name))
return
xliffExport = XliffExport(self.config, filename, source, target, copy, cdata)
xliffExport.export(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
client.alert(_(u'Exported to %s') % filename)
def exportScorm(self, client, filename, stylesDir, scormType):
"""
Exports this package to a scorm package file
"""
try:
filename = Path(filename)
log.debug(u"exportScorm, filename=%s" % filename)
# Append an extension if required
if not filename.lower().endswith('.zip'):
filename += '.zip'
if Path(filename).exists():
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXPORT FAILED!\n%s') % msg)
return
# Do the export
scormExport = ScormExport(self.config, stylesDir, filename, scormType)
modifiedMetaData = scormExport.export(self.package)
has_uncut_resources = False
if G.application.config.cutFileName == "1":
has_uncut_resources = scormExport.hasUncutResources()
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
if modifiedMetaData != False and modifiedMetaData['modifiedMetaData']:
client.alert(_(u'The following fields have been cut to meet the SCORM 1.2 standard: %s') % ', '.join(modifiedMetaData['fieldsModified']))
else:
if not has_uncut_resources:
client.alert(_(u'Exported to %s') % filename)
else:
client.alert(_(u'Exported to %s.\nThere were some resources that couldn\'t be renamed to be compatible with ISO9660.') % filename)
def exportEpub3(self, client, filename, stylesDir):
try:
log.debug(u"exportEpub3, filename=%s" % filename)
filename = Path(filename)
# Do the export
filename = self.b4save(client, filename, '.epub', _(u'EXPORT FAILED!'))
epub3Export = Epub3Export(self.config, stylesDir, filename)
epub3Export.export(self.package)
# epub3Export.exportZip(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s' % str(e)))
raise
client.alert(_(u'Exported to %s') % filename)
def exportReport(self, client, filename, stylesDir):
"""
Generates this package report to a file
"""
try:
log.debug(u"exportReport")
# Append an extension if required
if not filename.lower().endswith('.csv'):
filename += '.csv'
if Path(filename).exists():
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXPORT FAILED!\n%s' % msg))
return
# Do the export
websiteExport = WebsiteExport(self.config, stylesDir, filename, report=True)
websiteExport.export(self.package)
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s' % str(e)))
raise
client.alert(_(u'Exported to %s' % filename))
def exportIMS(self, client, filename, stylesDir):
"""
Exports this package to a ims package file
"""
try:
log.debug(u"exportIMS")
# Append an extension if required
if not filename.lower().endswith('.zip'):
filename += '.zip'
if Path(filename).exists():
msg = _(u'"%s" already exists.\nPlease try again with a different filename') % filename
client.alert(_(u'EXPORT FAILED!\n%s') % msg)
return
# Do the export
imsExport = IMSExport(self.config, stylesDir, filename)
imsExport.export(self.package)
has_uncut_resources = False
if G.application.config.cutFileName == "1":
has_uncut_resources = imsExport.hasUncutResources()
except Exception, e:
client.alert(_('EXPORT FAILED!\n%s') % str(e))
raise
if not has_uncut_resources:
client.alert(_(u'Exported to %s') % filename)
else:
client.alert(_(u'Exported to %s.\nThere were some resources that couldn\'t be renamed to be compatible with ISO9660.') % filename)
# Utility methods
def _startFile(self, filename):
"""
Launches an exported web site or page
"""
if hasattr(os, 'startfile'):
try:
os.startfile(filename)
except UnicodeEncodeError:
os.startfile(filename.encode(Path.fileSystemEncoding))
else:
if (filename / 'index.html').exists():
filename /= 'index.html'
else:
filename /= 'index.htm'
G.application.config.browser.open('file://' + filename)
def _loadPackage(self, client, filename, newLoad=True,
destinationPackage=None, isTemplate=False, preventUpdateRecent=False):
"""Load the package named 'filename'"""
try:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
filename2 = toUnicode(filename, encoding)
log.debug("filename and path" + filename2)
# see if the file exists AND is readable by the user
try:
open(filename2, 'rb').close()
except IOError:
filename2 = toUnicode(filename, 'utf-8')
try:
open(filename2, 'rb').close()
except IOError:
client.alert(_(u'File %s does not exist or is not readable.') % filename2)
return None
if isTemplate == False:
package = Package.load(filename2, newLoad, destinationPackage, preventUpdateRecent=preventUpdateRecent)
else:
package = self.session.packageStore.createPackageFromTemplate(filename)
if package is None:
raise Exception(_("Couldn't load file, please email file to [email protected]"))
except Exception, exc:
if log.getEffectiveLevel() == logging.DEBUG:
client.alert(_(u'Sorry, wrong file format:\n%s') % unicode(exc))
else:
client.alert(_(u'Sorry, wrong file format'))
log.error(u'Error loading package "%s": %s' % (filename2, unicode(exc)))
log.error(u'Traceback:\n%s' % traceback.format_exc())
raise
return package
|
exelearning/iteexe
|
exe/jsui/mainpage.py
|
Python
|
gpl-2.0
| 88,087
|
[
"VisIt"
] |
122842ca3c7b1b4414ba003801a87f571e84a2ca777b9eb8b6877d37ea8334db
|
# -*- coding: utf-8 -*-
# Copyright (C) Brian Moe (2013-2014), Duncan Macleod (2014-)
#
# This file is part of LIGO CIS Core.
#
# LIGO CIS Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LIGO CIS Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LIGO CIS Core. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.utils.decorators import available_attrs
from django.utils.http import urlquote
from django.shortcuts import redirect
import logging
from functools import wraps
class LigoShibbolethMiddleware(RemoteUserMiddleware):
"""Middleware layer for LIGO.ORG Shibboleth authentication
"""
if hasattr(settings, 'SHIB_AUTHENTICATION_IDENTITY_HEADER'):
header = settings.SHIB_AUTHENTICATION_IDENTITY_HEADER
else:
header = 'REMOTE_USER'
def process_request(self, request):
logger = logging.getLogger(
'ligodjangoauth.LigoShibbolethMiddleware.process_request')
logger.debug('invoked')
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The LigoShibbolethMiddlware auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the LigoShibbolethMiddleware class.")
try:
username = request.META[self.header]
logger.debug('found username %s' % username)
except KeyError:
# If specified header doesn't exist then return (leaving
# request.user set to AnonymousUser by the
# AuthenticationMiddleware).
logger.warn('could not find %s' % self.header)
return
# we are not using or relying on Django sessions - always authenticate
user = auth.authenticate(identity=username, request=request)
if user:
# User is valid. Set request.user
request.user = user
class LigoShibbolethAuthBackend(RemoteUserBackend):
"""Backend for LIGO.ORG Shibboleth authentication
"""
if hasattr(settings, 'ADMIN_GROUP_HEADER'):
header = settings.ADMIN_GROUP_HEADER
else:
header = 'isMemberOf'
if hasattr(settings, 'ADMIN_GROUP'):
adminGroup = settings.ADMIN_GROUP
else:
adminGroup = 'Communities:LVC:LVCGroupMembers'
def authenticate(self, identity, request):
"""Authenticate a user
"""
logger = logging.getLogger(
'ligodjangoauth.LigoShibbolethAuthBackend.authenticate')
logger.debug('invoked with identity %s' % identity)
if not identity:
return
# create the user object
user, created = User.objects.get_or_create(username=identity)
# This is different from system version,
# which ALWAYS reconciles permissions.
if created:
# reconcile the authorization in headers with the
# user object attributes, the default is no privileges
user.is_staff = False
user.is_superuser = False
try:
groups = request.META[self.header].split(';')
except KeyError:
pass
else:
if self.adminGroup in groups:
user.is_staff = True
user.is_superuser = True
user.save()
logger.debug('saved user object with identity %s' % identity)
return user
def user_passes_test(
test_func,
login_url=getattr(settings, 'SHIB_AUTHENTICATION_SESSION_INITATOR',
None),
redirect_field_name="target"):
"""Check whether a user can authenticate
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
Parameters
----------
test_func : `callable`
function that returns the authentication state for a given user
login_url : `str`
URL string for login redirect
redirect_field_name : `str`
name of HTTP redirect field
"""
def decorated_method(view_func):
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = urlquote(
request.build_absolute_uri(request.get_full_path()))
return HttpResponseRedirect(
'%s?%s=%s' % (login_url, redirect_field_name, path))
return wraps(
view_func, assigned=available_attrs(view_func))(_wrapped_view)
return decorated_method
def login_required(function=None, redirect_field_name='target'):
"""Check whether a user is logged in
This method is a decorator, checking that a user is logged in,
and redirecting to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def admin_required(f):
"""Decorator for a `View` that requires authentication
Should only be used to decorate view functions that require
authentication and membership in the admin group.
Assumes args[0] passed to the wrapped function f is the Django
request object passed into a view.
"""
@wraps(f)
def wrapper(*args, **kwds):
if hasattr(settings, 'ADMIN_GROUP_HEADER'):
header = settings.ADMIN_GROUP_HEADER
else:
header = 'isMemberOf'
if hasattr(settings, 'ADMIN_GROUP'):
admin_group_name = settings.ADMIN_GROUP
else:
admin_group_name = 'Communities:LVC:LVCGroupMembers'
request = args[0]
user_groups = request.META.get(header, None)
if not user_groups:
return redirect('views.admin_only')
user_groups = user_groups.split(';')
if admin_group_name not in user_groups:
return redirect('views.admin_only')
return f(*args, **kwds)
return wrapper
|
lscsoft/cis.server
|
cisserver/middleware/ligodjangoauth.py
|
Python
|
gpl-3.0
| 7,145
|
[
"Brian",
"MOE"
] |
04ee55f18d5dfec542ec5f85b877fb907c525eddeb955b4dbe5efa7831e655d6
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusStDev",
"MultivariateNormalCholesky",
"MultivariateNormalFull",
"MultivariateNormalDiagPlusVDVT",
]
_mvn_prob_note = """
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
```
self.batch_shape + self.event_shape
```
or
```
[M1,...,Mm] + self.batch_shape + self.event_shape
```
"""
class _MultivariateNormalOperatorPD(distribution.Distribution):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and an instance of
`OperatorPDBase`, which provides access to a symmetric positive definite
operator, which defines the covariance.
#### Mathematical details
With `C` the covariance matrix represented by the operator, the PDF of this
distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian.
mu = [1, 2, 3]
chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]]
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1.])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
cov,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: Floating point tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: Instance of `OperatorPDBase` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[mu] + cov.inputs):
self._mu = array_ops.identity(mu, name="mu")
self._cov = cov
self._validate_args = validate_args # Needed by _assert_valid_mu.
self._mu = self._assert_valid_mu(self._mu)
super(_MultivariateNormalOperatorPD, self).__init__(
dtype=self._mu.dtype,
is_reparameterized=True,
is_continuous=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._mu] + cov.inputs,
name=ns)
def _assert_valid_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s" % (mu.dtype, cov.dtype))
# Try to validate with static checks.
mu_shape = mu.get_shape()
cov_shape = cov.get_shape()
if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
if mu_shape != cov_shape[:-1]:
raise ValueError(
"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, "
"cov.shape=%s" % (mu_shape, cov_shape))
else:
return mu
# Static checks could not be run, so possibly do dynamic checks.
if not self.validate_args:
return mu
else:
assert_same_rank = check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
)
with ops.control_dependencies([assert_same_rank]):
assert_same_shape = check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
)
return control_flow_ops.with_dependencies([assert_same_shape], mu)
@property
def mu(self):
return self._mu
@property
def sigma(self):
"""Dense (batch) covariance matrix, if available."""
with ops.name_scope(self.name):
return self._cov.to_dense()
def log_sigma_det(self, name="log_sigma_det"):
"""Log of determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return self._cov.log_det()
def sigma_det(self, name="sigma_det"):
"""Determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return math_ops.exp(self._cov.log_det())
def _batch_shape(self):
return self._cov.batch_shape()
def _get_batch_shape(self):
return self._cov.get_batch_shape()
def _event_shape(self):
return array_ops.pack([self._cov.vector_space_dimension()])
def _get_event_shape(self):
return self._cov.get_shape()[-1:]
def _sample_n(self, n, seed=None):
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
shape = array_ops.concat_v2([self._cov.vector_shape(), [n]], 0)
white_samples = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
perm = array_ops.concat_v2(
(array_ops.pack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)), 0)
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
return samples
@distribution_util.AppendDocstring(_mvn_prob_note)
def _log_prob(self, x):
# Q: Why are shape requirements as stated above?
# A: The compatible shapes are precisely the ones that will broadcast to
# a shape compatible with self._cov.
# See Operator base class for notes about shapes compatible with self._cov.
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
# _assert_valid_mu asserts that self.mu has same batch shape as self.cov.
# so batch shape of self.mu = that of self._cov and self, and the
# batch shape of x_centered is a broadcast version of these. If this
# broadcast results in a shape like
# [M1,...,Mm] + self.batch_shape + self.event_shape
# OR
# self.batch_shape + self.event_shape
# then subsequent operator calls are guaranteed to work.
x_centered = x - self.mu
# Compute the term x^{-1} sigma^{-1} x which appears in the exponent of
# the pdf.
x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered)
k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype)
log_prob_value = -0.5 * (self.log_sigma_det() +
k * math.log(2. * math.pi) +
x_whitened_norm)
output_static_shape = x_centered.get_shape()[:-1]
log_prob_value.set_shape(output_static_shape)
return log_prob_value
@distribution_util.AppendDocstring(_mvn_prob_note)
def _prob(self, x):
return math_ops.exp(self.log_prob(x))
def _entropy(self):
log_sigma_det = self.log_sigma_det()
one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi),
dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype)
entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2
entropy_value.set_shape(log_sigma_det.get_shape())
return entropy_value
def _mean(self):
return array_ops.identity(self._mu)
def _variance(self):
return self.sigma
def _mode(self):
return array_ops.identity(self._mu)
class MultivariateNormalDiag(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a 1-D diagonal
`diag_stdev`, representing the standard deviations. This distribution
assumes the random variables, `(X_1,...,X_k)` are independent, thus no
non-diagonal terms of the covariance matrix are needed.
This allows for `O(k)` pdf evaluation, sampling, and storage.
#### Mathematical details
The PDF of this distribution is defined in terms of the diagonal covariance
determined by `diag_stdev`: `C_{ii} = diag_stdev[i]**2`.
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and the square roots of the (independent) random variables.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal standard deviation.
mu = [1, 2, 3.]
diag_stdev = [4, 5, 6.]
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_stdev = ... # shape 2 x 3, positive.
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and standard deviations `diag_stdev`.
Each batch member represents a random vector `(X_1,...,X_k)` of independent
random normals.
The mean of `X_i` is `mu[i]`, and the standard deviation is `diag_stdev[i]`.
Args:
mu: Rank `N + 1` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
diag_stdev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: `Boolean`, default `False`. Whether to validate
input with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `diag_stdev` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[diag_stdev]) as ns:
cov = operator_pd_diag.OperatorPDSqrtDiag(diag_stdev,
verify_pd=validate_args)
super(MultivariateNormalDiag, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagWithSoftplusStDev(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
def __init__(self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusStdDev"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[diag_stdev]) as ns:
super(MultivariateNormalDiagWithSoftplusStDev, self).__init__(
mu=mu,
diag_stdev=nn.softplus(diag_stdev),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagPlusVDVT(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
Every batch member of this distribution is defined by a mean and a lightweight
covariance matrix `C`.
#### Mathematical details
The PDF of this distribution in terms of the mean `mu` and covariance `C` is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a lightweight
definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)`
sampling and storage (per batch member).
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and square root of the covariance `S = M + V D V^T`. Extra
leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with covariance square root
# S = M + V D V^T, where V D V^T is a matrix-rank 2 update.
mu = [1, 2, 3.]
diag_large = [1.1, 2.2, 3.3]
v = ... # shape 3 x 2
diag_small = [4., 5.]
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians. This time, don't provide
# diag_small. This means S = M + V V^T.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_large = ... # shape 2 x 3
v = ... # shape 2 x 3 x 1, a matrix-rank 1 update.
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_large,
v,
diag_small=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusVDVT"):
"""Multivariate Normal distributions on `R^k`.
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a
lightweight definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
Args:
mu: Rank `n + 1` floating point tensor with shape `[N1,...,Nn, k]`,
`n >= 0`. The means.
diag_large: Optional rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`.
v: Rank `n + 1` floating point tensor, shape `[N1,...,Nn, k, r]`
`n >= 0`. Defines the matrix `V`.
diag_small: Rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default
is `None`, which means `D` will be the identity matrix.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[diag_large, v, diag_small]) as ns:
cov = operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator_pd_diag.OperatorPDDiag(
diag_large, verify_pd=validate_args),
v,
diag=diag_small,
verify_pd=validate_args,
verify_shapes=validate_args)
super(MultivariateNormalDiagPlusVDVT, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalCholesky(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`.
Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling,
and requires `O(k^2)` storage.
#### Mathematical details
The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`.
The PDF of this distribution is then:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
# Note, this would be more efficient with MultivariateNormalDiag.
mu = [1, 2, 3.]
chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
Trainable (batch) Cholesky matrices can be created with
`tf.contrib.distributions.matrix_diag_transform()`
"""
def __init__(self,
mu,
chol,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCholesky"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `chol` which holds the (batch) Cholesky
factors, such that the covariance of each batch member is `chol chol^T`.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `chol` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[chol]) as ns:
cov = operator_pd_cholesky.OperatorPDCholesky(chol,
verify_pd=validate_args)
super(MultivariateNormalCholesky, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalFull(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations.
#### Mathematical details
With `C = sigma`, the PDF of this distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3.]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]]
dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
sigma = ... # shape 2 x 3 x 3, positive definite.
dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
sigma,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFull"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `sigma`, the mean and covariance.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `sigma` are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[sigma]) as ns:
cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=validate_args)
super(MultivariateNormalFull, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
@kullback_leibler.RegisterKL(
_MultivariateNormalOperatorPD, _MultivariateNormalOperatorPD)
def _kl_mvn_mvn_brute_force(mvn_a, mvn_b, name=None):
"""Batched KL divergence `KL(mvn_a || mvn_b)` for multivariate normals.
With `X`, `Y` both multivariate normals in `R^k` with means `mu_x`, `mu_y` and
covariance `C_x`, `C_y` respectively,
```
KL(X || Y) = 0.5 * ( T + Q + - k + L ),
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
L := Log[Det(C_b)] - Log[Det(C_a)]
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k^2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
mvn_a: Instance of subclass of `_MultivariateNormalOperatorPD`.
mvn_b: Instance of subclass of `_MultivariateNormalOperatorPD`.
name: (optional) name to use for created ops. Default "kl_mvn_mvn".
Returns:
Batchwise `KL(mvn_a || mvn_b)`.
"""
# Access the "private" OperatorPD that each mvn is built from.
cov_a = mvn_a._cov # pylint: disable=protected-access
cov_b = mvn_b._cov # pylint: disable=protected-access
mu_a = mvn_a.mu
mu_b = mvn_b.mu
inputs = [mu_a, mu_b] + cov_a.inputs + cov_b.inputs
with ops.name_scope(name, "kl_mvn_mvn", inputs):
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ik} (inv(B) A)_{ik}^2
# The second equality follows from the cyclic permutation property.
b_inv_a = cov_b.sqrt_solve(cov_a.sqrt_to_dense())
t = math_ops.reduce_sum(
math_ops.square(b_inv_a),
reduction_indices=[-1, -2])
q = cov_b.inv_quadratic_form_on_vectors(mu_b - mu_a)
k = math_ops.cast(cov_a.vector_space_dimension(), mvn_a.dtype)
one_half_l = cov_b.sqrt_log_det() - cov_a.sqrt_log_det()
return 0.5 * (t + q - k) + one_half_l
|
ppries/tensorflow
|
tensorflow/contrib/distributions/python/ops/mvn.py
|
Python
|
apache-2.0
| 28,684
|
[
"Gaussian"
] |
9f36ec0b1260ab7401a33563277fd2b3f795af1347448a8a7cc47f2b53bef98a
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Christian Schwantes
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
from mdtraj.utils import ensure_type
from mdtraj.utils.six import string_types
from mdtraj.utils.six.moves import xrange
from mdtraj.core import element
import mdtraj as md
import itertools
__all__ = ['compute_contacts', 'squareform']
##############################################################################
# Code
##############################################################################
def compute_contacts(traj, contacts='all', scheme='closest-heavy', ignore_nonprotein=True, periodic=True,
soft_min=False, soft_min_beta=20):
"""Compute the distance between pairs of residues in a trajectory.
Parameters
----------
traj : md.Trajectory
An mdtraj trajectory. It must contain topology information.
contacts : array-like, ndim=2 or 'all'
An array containing pairs of indices (0-indexed) of residues to
compute the contacts between, or 'all'. The string 'all' will
select all pairs of residues separated by two or more residues
(i.e. the i to i+1 and i to i+2 pairs will be excluded).
scheme : {'ca', 'closest', 'closest-heavy', 'sidechain', 'sidechain-heavy'}
scheme to determine the distance between two residues:
'ca' : distance between two residues is given by the distance
between their alpha carbons
'closest' : distance is the closest distance between any
two atoms in the residues
'closest-heavy' : distance is the closest distance between
any two non-hydrogen atoms in the residues
'sidechain' : distance is the closest distance between any
two atoms in residue sidechains
'sidechain-heavy' : distance is the closest distance between
any two non-hydrogen atoms in residue sidechains
ignore_nonprotein : bool
When using `contact==all`, don't compute contacts between
"residues" which are not protein (i.e. do not contain an alpha
carbon).
periodic : bool, default=True
If periodic is True and the trajectory contains unitcell information,
we will compute distances under the minimum image convention.
soft_min : bool, default=False
If soft_min is true, we will use a diffrentiable version of
the scheme. The exact expression used
is d = \frac{\beta}{log\sum_i{exp(\frac{\beta}{d_i}})} where
beta is user parameter which defaults to 20nm. The expression
we use is copied from the plumed mindist calculator.
http://plumed.github.io/doc-v2.0/user-doc/html/mindist.html
soft_min_beta : float, default=20nm
The value of beta to use for the soft_min distance option.
Very large values might cause small contact distances to go to 0.
Returns
-------
distances : np.ndarray, shape=(n_frames, n_pairs), dtype=np.float32
Distances for each residue-residue contact in each frame
of the trajectory
residue_pairs : np.ndarray, shape=(n_pairs, 2), dtype=int
Each row of this return value gives the indices of the residues
involved in the contact. This argument mirrors the `contacts` input
parameter. When `all` is specified as input, this return value
gives the actual residue pairs resolved from `all`. Furthermore,
when scheme=='ca', any contact pair supplied as input corresponding
to a residue without an alpha carbon (e.g. HOH) is ignored from the
input contacts list, meanings that the indexing of the
output `distances` may not match up with the indexing of the input
`contacts`. But the indexing of `distances` *will* match up with
the indexing of `residue_pairs`
Examples
--------
>>> # To compute the contact distance between residue 0 and 10 and
>>> # residues 0 and 11
>>> md.compute_contacts(t, [[0, 10], [0, 11]])
>>> # the itertools library can be useful to generate the arrays of indices
>>> group_1 = [0, 1, 2]
>>> group_2 = [10, 11]
>>> pairs = list(itertools.product(group_1, group_2))
>>> print(pairs)
[(0, 10), (0, 11), (1, 10), (1, 11), (2, 10), (2, 11)]
>>> md.compute_contacts(t, pairs)
See Also
--------
mdtraj.geometry.squareform : turn the result from this function
into a square "contact map"
Topology.residue : Get residues from the topology by index
"""
if traj.topology is None:
raise ValueError('contact calculation requires a topology')
if isinstance(contacts, string_types):
if contacts.lower() != 'all':
raise ValueError('(%s) is not a valid contacts specifier' % contacts.lower())
residue_pairs = []
for i in xrange(traj.n_residues):
residue_i = traj.topology.residue(i)
if ignore_nonprotein and not any(a for a in residue_i.atoms if a.name.lower() == 'ca'):
continue
for j in xrange(i+3, traj.n_residues):
residue_j = traj.topology.residue(j)
if ignore_nonprotein and not any(a for a in residue_j.atoms if a.name.lower() == 'ca'):
continue
if residue_i.chain == residue_j.chain:
residue_pairs.append((i, j))
residue_pairs = np.array(residue_pairs)
if len(residue_pairs) == 0:
raise ValueError('No acceptable residue pairs found')
else:
residue_pairs = ensure_type(np.asarray(contacts), dtype=int, ndim=2, name='contacts',
shape=(None, 2), warn_on_cast=False)
if not np.all((residue_pairs >= 0) * (residue_pairs < traj.n_residues)):
raise ValueError('contacts requests a residue that is not in the permitted range')
# now the bulk of the function. This will calculate atom distances and then
# re-work them in the required scheme to get residue distances
scheme = scheme.lower()
if scheme not in ['ca', 'closest', 'closest-heavy', 'sidechain', 'sidechain-heavy']:
raise ValueError('scheme must be one of [ca, closest, closest-heavy, sidechain, sidechain-heavy]')
if scheme == 'ca':
if soft_min:
import warnings
warnings.warn("The soft_min=True option with scheme=ca gives"
"the same results as soft_min=False")
filtered_residue_pairs = []
atom_pairs = []
for r0, r1 in residue_pairs:
ca_atoms_0 = [a.index for a in traj.top.residue(r0).atoms if a.name.lower() == 'ca']
ca_atoms_1 = [a.index for a in traj.top.residue(r1).atoms if a.name.lower() == 'ca']
if len(ca_atoms_0) == 1 and len(ca_atoms_1) == 1:
atom_pairs.append((ca_atoms_0[0], ca_atoms_1[0]))
filtered_residue_pairs.append((r0, r1))
elif len(ca_atoms_0) == 0 or len(ca_atoms_1) == 0:
# residue does not contain a CA atom, skip it
if contacts != 'all':
# if the user manually asked for this residue, and didn't use "all"
import warnings
warnings.warn('Ignoring contacts pair %d-%d. No alpha carbon.' % (r0, r1))
else:
raise ValueError('More than 1 alpha carbon detected in residue %d or %d' % (r0, r1))
residue_pairs = np.array(filtered_residue_pairs)
distances = md.compute_distances(traj, atom_pairs, periodic=periodic)
elif scheme in ['closest', 'closest-heavy', 'sidechain', 'sidechain-heavy']:
if scheme == 'closest':
residue_membership = [[atom.index for atom in residue.atoms]
for residue in traj.topology.residues]
elif scheme == 'closest-heavy':
# then remove the hydrogens from the above list
residue_membership = [[atom.index for atom in residue.atoms if not (atom.element == element.hydrogen)]
for residue in traj.topology.residues]
elif scheme == 'sidechain':
residue_membership = [[atom.index for atom in residue.atoms if atom.is_sidechain]
for residue in traj.topology.residues]
elif scheme == 'sidechain-heavy':
# then remove the hydrogens from the above list
if 'GLY' in [residue.name for residue in traj.topology.residues]:
import warnings
warnings.warn('selected topology includes at least one glycine residue, which has no heavy atoms in its sidechain. The distances involving glycine residues '
'will be computed using the sidechain hydrogen instead.')
residue_membership = [[atom.index for atom in residue.atoms if atom.is_sidechain and not (atom.element == element.hydrogen)] if not residue.name == 'GLY'
else [atom.index for atom in residue.atoms if atom.is_sidechain]
for residue in traj.topology.residues]
residue_lens = [len(ainds) for ainds in residue_membership]
atom_pairs = []
n_atom_pairs_per_residue_pair = []
for pair in residue_pairs:
atom_pairs.extend(list(itertools.product(residue_membership[pair[0]], residue_membership[pair[1]])))
n_atom_pairs_per_residue_pair.append(residue_lens[pair[0]] * residue_lens[pair[1]])
atom_distances = md.compute_distances(traj, atom_pairs, periodic=periodic)
# now squash the results based on residue membership
n_residue_pairs = len(residue_pairs)
distances = np.zeros((len(traj), n_residue_pairs), dtype=np.float32)
n_atom_pairs_per_residue_pair = np.asarray(n_atom_pairs_per_residue_pair)
for i in xrange(n_residue_pairs):
index = int(np.sum(n_atom_pairs_per_residue_pair[:i]))
n = n_atom_pairs_per_residue_pair[i]
if not soft_min:
distances[:, i] = atom_distances[:, index : index + n].min(axis=1)
else:
distances[:, i] = soft_min_beta / \
np.log(np.sum(np.exp(soft_min_beta/
atom_distances[:, index : index + n]), axis=1))
else:
raise ValueError('This is not supposed to happen!')
return distances, residue_pairs
def squareform(distances, residue_pairs):
"""Reshape the contact distance to square contact maps
Parameters
----------
distances : np.ndarray, shape=(n_frames, n_pairs)
Distances between pairs of residues, as computed by
`mdtraj.geometry.compute_contacts`.
residue_pairs : np.ndarray, shape=(n_pairs, 2)
The indices of the residues involved in each pair, as
returned by `mdtraj.geometry.compute_contacts`
Returns
-------
contact_maps : np.ndarray, shape=(n_frames, n_residues, n_residues)
Reshaped version of `distances`, such that the distance, in
the `k`th frame of the trajectory from residue `i` to residue `j`
is given by `contact_maps[k, i, j]`. All entries in `contact_maps`
corresponding to the distance between residues that were not
part of residue_pairs are 0.0.
See Also
--------
mdtraj.compute_contacts : Compute the array of contact distances
"""
if not isinstance(distances, np.ndarray) and distances.ndim == 2:
raise ValueError('distances must be a 2d array')
residue_pairs = ensure_type(
residue_pairs, dtype=int, ndim=2, name='residue_pars',
shape=(None, 2), warn_on_cast=False)
if not np.all(residue_pairs >= 0):
raise ValueError('residue_pairs references a residue that is not in '
'the permitted range')
if distances.shape[1] != residue_pairs.shape[0]:
raise ValueError('The number of pairs in distances, %d, does not '
'match the number of pairs in residue_pairs, %d.' %
(distances.shape[1], residue_pairs.shape[0]))
n_residues = np.max(residue_pairs) + 1
contact_maps = np.zeros((distances.shape[0], n_residues, n_residues), dtype=distances.dtype)
contact_maps[:, residue_pairs[:, 0], residue_pairs[:, 1]] = distances
contact_maps[:, residue_pairs[:, 1], residue_pairs[:, 0]] = distances
return contact_maps
|
rmcgibbo/mdtraj
|
mdtraj/geometry/contact.py
|
Python
|
lgpl-2.1
| 13,688
|
[
"MDTraj"
] |
063e3813afa745638247f97d1de8ed07e155b9ac6fe1f6b063fb19bc53afa2b6
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
filename = 'train.csv'
porto_frame = pd.read_csv(filename,na_values=-1)
target_labels = list(porto_frame['target'].values)
del porto_frame['id']
del porto_frame['target']
imputer = Imputer()
imputed_porto_frame = imputer.fit_transform(porto_frame.values)
X_train,X_test,y_train,y_test = train_test_split(imputed_porto_frame,target_labels,test_size=0.1,random_state=42)
classifier_list,classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
|
rupakc/Kaggle-Compendium
|
Porto Seguro Safe Driver Prediction/porto-baseline.py
|
Python
|
mit
| 2,887
|
[
"Gaussian"
] |
9514f5cd0e7eef03b98fd1aed62d8f2e6622def811d21e404e92edc6925a5746
|
# -*- coding: utf-8 -*-
"""
pysteps.nowcasts.sprog
======================
Implementation of the S-PROG method described in :cite:`Seed2003`
.. autosummary::
:toctree: ../generated/
forecast
"""
import numpy as np
import time
from pysteps import cascade
from pysteps import extrapolation
from pysteps import utils
from pysteps.nowcasts import utils as nowcast_utils
from pysteps.postprocessing import probmatching
from pysteps.timeseries import autoregression, correlation
try:
import dask
DASK_IMPORTED = True
except ImportError:
DASK_IMPORTED = False
def forecast(
R,
V,
timesteps,
n_cascade_levels=6,
R_thr=None,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
ar_order=2,
conditional=False,
probmatching_method="cdf",
num_workers=1,
fft_method="numpy",
domain="spatial",
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the Spectral Prognosis (S-PROG) method.
Parameters
----------
R: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between
the inputs are assumed to be regular.
V: array-like
Array of shape (2,m,n) containing the x- and y-components of the
advection field.
The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements of
the list are required to be in ascending order.
n_cascade_levels: int, optional
The number of cascade levels to use.
R_thr: float
The threshold value for minimum observable precipitation intensity.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}, optional
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}, optional
Name of the bandpass filter method to use with the cascade decomposition.
See the documentation of pysteps.cascade.interface.
ar_order: int, optional
The order of the autoregressive model to use. Must be >= 1.
conditional: bool, optional
If set to True, compute the statistics of the precipitation field
conditionally by excluding pixels where the values are
below the threshold R_thr.
probmatching_method: {'cdf','mean',None}, optional
Method for matching the conditional statistics of the forecast field
(areas with precipitation intensity above the threshold R_thr) with those
of the most recently observed one. 'cdf'=map the forecast CDF to the
observed one, 'mean'=adjust only the mean value,
None=no matching applied.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if dask
is enabled or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
domain: {"spatial", "spectral"}
If "spatial", all computations are done in the spatial domain (the
classical S-PROG model). If "spectral", the AR(2) models are applied
directly in the spectral domain to reduce memory footprint and improve
performance :cite:`PCH2019a`.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input precipitation fields
R. If measure_time is True, the return value is a three-element tuple
containing the nowcast array, the initialization time of the nowcast
generator and the time used in the main loop (seconds).
See also
--------
pysteps.extrapolation.interface, pysteps.cascade.interface
References
----------
:cite:`Seed2003`, :cite:`PCH2019a`
"""
_check_inputs(R, V, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
if filter_kwargs is None:
filter_kwargs = dict()
if np.any(~np.isfinite(V)):
raise ValueError("V contains non-finite values")
print("Computing S-PROG nowcast:")
print("-------------------------")
print("")
print("Inputs:")
print("-------")
print("input dimensions: %dx%d" % (R.shape[1], R.shape[2]))
print("")
print("Methods:")
print("--------")
print("extrapolation: %s" % extrap_method)
print("bandpass filter: %s" % bandpass_filter_method)
print("decomposition: %s" % decomp_method)
print("conditional statistics: %s" % ("yes" if conditional else "no"))
print("probability matching: %s" % probmatching_method)
print("FFT method: %s" % fft_method)
print("domain: %s" % domain)
print("")
print("Parameters:")
print("-----------")
if isinstance(timesteps, int):
print("number of time steps: %d" % timesteps)
else:
print("time steps: %s" % timesteps)
print("parallel threads: %d" % num_workers)
print("number of cascade levels: %d" % n_cascade_levels)
print("order of the AR(p) model: %d" % ar_order)
print("precip. intensity threshold: %g" % R_thr)
if measure_time:
starttime_init = time.time()
fft = utils.get_method(fft_method, shape=R.shape[1:], n_threads=num_workers)
M, N = R.shape[1:]
# initialize the band-pass filter
filter_method = cascade.get_method(bandpass_filter_method)
filter = filter_method((M, N), n_cascade_levels, **filter_kwargs)
decomp_method, recomp_method = cascade.get_method(decomp_method)
extrapolator_method = extrapolation.get_method(extrap_method)
R = R[-(ar_order + 1) :, :, :].copy()
R_min = np.nanmin(R)
# determine the domain mask from non-finite values
domain_mask = np.logical_or.reduce(
[~np.isfinite(R[i, :]) for i in range(R.shape[0])]
)
# determine the precipitation threshold mask
if conditional:
MASK_thr = np.logical_and.reduce(
[R[i, :, :] >= R_thr for i in range(R.shape[0])]
)
else:
MASK_thr = None
# initialize the extrapolator
x_values, y_values = np.meshgrid(np.arange(R.shape[2]), np.arange(R.shape[1]))
xy_coords = np.stack([x_values, y_values])
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
extrap_kwargs["allow_nonfinite_values"] = True
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
res = list()
def f(R, i):
return extrapolator_method(R[i, :], V, ar_order - i, "min", **extrap_kwargs)[-1]
for i in range(ar_order):
if not DASK_IMPORTED:
R[i, :, :] = f(R, i)
else:
res.append(dask.delayed(f)(R, i))
if DASK_IMPORTED:
num_workers_ = len(res) if num_workers > len(res) else num_workers
R = np.stack(list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]])
# replace non-finite values with the minimum value
R = R.copy()
for i in range(R.shape[0]):
R[i, ~np.isfinite(R[i, :])] = np.nanmin(R[i, :])
# compute the cascade decompositions of the input precipitation fields
R_d = []
for i in range(ar_order + 1):
R_ = decomp_method(
R[i, :, :],
filter,
mask=MASK_thr,
fft_method=fft,
output_domain=domain,
normalize=True,
compute_stats=True,
compact_output=True,
)
R_d.append(R_)
# rearrange the cascade levels into a four-dimensional array of shape
# (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
R_c = nowcast_utils.stack_cascades(
R_d, n_cascade_levels, convert_to_full_arrays=True
)
# compute lag-l temporal autocorrelation coefficients for each cascade level
GAMMA = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
if domain == "spatial":
GAMMA[i, :] = correlation.temporal_autocorrelation(R_c[i], mask=MASK_thr)
else:
GAMMA[i, :] = correlation.temporal_autocorrelation(
R_c[i], domain="spectral", x_shape=R.shape[1:]
)
R_c = nowcast_utils.stack_cascades(
R_d, n_cascade_levels, convert_to_full_arrays=False
)
R_d = R_d[-1]
nowcast_utils.print_corrcoefs(GAMMA)
if ar_order == 2:
# adjust the lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(GAMMA[i, 0], GAMMA[i, 1])
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
PHI = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])
nowcast_utils.print_ar_params(PHI)
# discard all except the p-1 last cascades because they are not needed for
# the AR(p) model
R_c = [R_c[i][-ar_order:] for i in range(n_cascade_levels)]
if probmatching_method == "mean":
mu_0 = np.mean(R[-1, :, :][R[-1, :, :] >= R_thr])
# compute precipitation mask and wet area ratio
MASK_p = R[-1, :, :] >= R_thr
war = 1.0 * np.sum(MASK_p) / (R.shape[1] * R.shape[2])
if measure_time:
init_time = time.time() - starttime_init
R = R[-1, :, :]
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
R_f = []
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
R_f_prev = R
extrap_kwargs["return_displacement"] = True
D = None
t_prev = 0.0
# iterate each time step
for t, subtimestep_idx in enumerate(timesteps):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in subtimestep_idx]
else:
subtimesteps = [t]
if (timestep_type == "list" and subtimesteps) or (
timestep_type == "int" and t > 0
):
is_nowcast_time_step = True
else:
is_nowcast_time_step = False
if is_nowcast_time_step:
print(
"Computing nowcast for time step %d... " % t,
end="",
flush=True,
)
if measure_time:
starttime = time.time()
for i in range(n_cascade_levels):
R_c[i] = autoregression.iterate_ar_model(R_c[i], PHI[i, :])
R_d["cascade_levels"] = [R_c[i][-1, :] for i in range(n_cascade_levels)]
if domain == "spatial":
R_d["cascade_levels"] = np.stack(R_d["cascade_levels"])
R_f_new = recomp_method(R_d)
if domain == "spectral":
R_f_new = fft.irfft2(R_f_new)
MASK = _compute_sprog_mask(R_f_new, war)
R_f_new[~MASK] = R_min
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, R)
elif probmatching_method == "mean":
mu_fct = np.mean(R_f_new[MASK])
R_f_new[MASK] = R_f_new[MASK] - mu_fct + mu_0
R_f_new[domain_mask] = np.nan
# advect the recomposed precipitation field to obtain the forecast for
# the current time step (or subtimesteps if non-integer time steps are
# given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
R_f_ip = (
1.0 - t_diff_prev_int
) * R_f_prev + t_diff_prev_int * R_f_new
else:
R_f_ip = R_f_prev
t_diff_prev = t_sub - t_prev
extrap_kwargs["displacement_prev"] = D
R_f_ep, D = extrapolator_method(
R_f_ip,
V,
[t_diff_prev],
**extrap_kwargs,
)
R_f.append(R_f_ep[0])
t_prev = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if not subtimesteps:
t_diff_prev = t + 1 - t_prev
extrap_kwargs["displacement_prev"] = D
_, D = extrapolator_method(
None,
V,
[t_diff_prev],
**extrap_kwargs,
)
t_prev = t + 1
R_f_prev = R_f_new
if is_nowcast_time_step:
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
if measure_time:
mainloop_time = time.time() - starttime_mainloop
R_f = np.stack(R_f)
if measure_time:
return R_f, init_time, mainloop_time
else:
return R_f
def _check_inputs(R, V, timesteps, ar_order):
if R.ndim != 3:
raise ValueError("R must be a three-dimensional array")
if R.shape[0] < ar_order + 1:
raise ValueError("R.shape[0] < ar_order+1")
if V.ndim != 3:
raise ValueError("V must be a three-dimensional array")
if R.shape[1:3] != V.shape[1:3]:
raise ValueError(
"dimension mismatch between R and V: shape(R)=%s, shape(V)=%s"
% (str(R.shape), str(V.shape))
)
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
def _compute_sprog_mask(R, war):
# obtain the CDF from the non-perturbed forecast that is
# scale-filtered by the AR(p) model
R_s = R.flatten()
# compute the threshold value R_pct_thr corresponding to the
# same fraction of precipitation pixels (forecast values above
# R_thr) as in the most recently observed precipitation field
R_s.sort(kind="quicksort")
x = 1.0 * np.arange(1, len(R_s) + 1)[::-1] / len(R_s)
i = np.argmin(abs(x - war))
# handle ties
if R_s[i] == R_s[i + 1]:
i = np.where(R_s == R_s[i])[0][-1]
R_pct_thr = R_s[i]
# determine a mask using the above threshold value to preserve the
# wet-area ratio
return R >= R_pct_thr
|
pySTEPS/pysteps
|
pysteps/nowcasts/sprog.py
|
Python
|
bsd-3-clause
| 15,968
|
[
"Gaussian"
] |
7d0c9c53f0ee99433d83be79a456b6bfc972ee855c32afed4d04b04e0bd4789b
|
"""
API operations on the contents of a history.
"""
from galaxy import exceptions
from galaxy import util
from galaxy.web import _future_expose_api as expose_api
from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
from galaxy.web import url_for
from galaxy.web.base.controller import BaseAPIController
from galaxy.web.base.controller import UsesHistoryDatasetAssociationMixin
from galaxy.web.base.controller import UsesHistoryMixin
from galaxy.web.base.controller import UsesLibraryMixin
from galaxy.web.base.controller import UsesLibraryMixinItems
from galaxy.web.base.controller import UsesTagsMixin
from galaxy.managers import histories
from galaxy.managers import hdas
from galaxy.managers.collections_util import api_payload_to_create_params, dictify_dataset_collection_instance
import logging
log = logging.getLogger( __name__ )
class HistoryContentsController( BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesHistoryMixin,
UsesLibraryMixin, UsesLibraryMixinItems, UsesTagsMixin ):
def __init__( self, app ):
super( HistoryContentsController, self ).__init__( app )
self.mgrs = util.bunch.Bunch(
histories=histories.HistoryManager(),
hdas=hdas.HDAManager()
)
def _decode_id( self, trans, id ):
try:
return trans.security.decode_id( id )
except:
raise exceptions.MalformedId( "Malformed History id ( %s ) specified, unable to decode"
% ( str( id ) ), type='error' )
@expose_api_anonymous
def index( self, trans, history_id, ids=None, **kwd ):
"""
index( self, trans, history_id, ids=None, **kwd )
* GET /api/histories/{history_id}/contents
return a list of HDA data for the history with the given ``id``
.. note:: Anonymous users are allowed to get their current history contents
If Ids is not given, index returns a list of *summary* objects for
every HDA associated with the given `history_id`.
If ids is given, index returns a *more complete* json object for each
HDA in the ids list.
:type history_id: str
:param history_id: encoded id string of the HDA's History
:type ids: str
:param ids: (optional) a comma separated list of encoded `HDA` ids
:param types: (optional) kinds of contents to index (currently just
dataset, but dataset_collection will be added shortly).
:type types: str
:rtype: list
:returns: dictionaries containing summary or detailed HDA information
.. seealso::
:func:`_summary_hda_dict` and
:func:`galaxy.web.base.controller.UsesHistoryDatasetAssociationMixin.get_hda_dict`
"""
rval = []
# get the history, if anon user and requesting current history - allow it
if( ( trans.user == None )
and ( history_id == trans.security.encode_id( trans.history.id ) ) ):
history = trans.history
# otherwise, check permissions for the history first
else:
history = self.mgrs.histories.get( trans, self._decode_id( trans, history_id ),
check_ownership=False, check_accessible=True )
# Allow passing in type or types - for continuity rest of methods
# take in type - but this one can be passed multiple types and
# type=dataset,dataset_collection is a bit silly.
types = kwd.get( 'type', kwd.get( 'types', None ) ) or []
if types:
types = util.listify(types)
else:
types = [ 'dataset', "dataset_collection" ]
contents_kwds = { 'types': types }
if ids:
ids = map( lambda id: trans.security.decode_id( id ), ids.split( ',' ) )
contents_kwds[ 'ids' ] = ids
# If explicit ids given, always used detailed result.
details = 'all'
else:
contents_kwds[ 'deleted' ] = kwd.get( 'deleted', None )
contents_kwds[ 'visible' ] = kwd.get( 'visible', None )
# details param allows a mixed set of summary and detailed hdas
# Ever more convoluted due to backwards compat..., details
# should be considered deprecated in favor of more specific
# dataset_details (and to be implemented dataset_collection_details).
details = kwd.get( 'details', None ) or kwd.get( 'dataset_details', None ) or []
if details and details != 'all':
details = util.listify( details )
for content in history.contents_iter( **contents_kwds ):
if isinstance( content, trans.app.model.HistoryDatasetAssociation ):
encoded_content_id = trans.security.encode_id( content.id )
detailed = details == 'all' or ( encoded_content_id in details )
if detailed:
rval.append( self._detailed_hda_dict( trans, content ) )
else:
rval.append( self._summary_hda_dict( trans, history_id, content ) )
elif isinstance( content, trans.app.model.HistoryDatasetCollectionAssociation ):
rval.append( self.__collection_dict( trans, content ) )
return rval
#TODO: move to model or Mixin
def _summary_hda_dict( self, trans, encoded_history_id, hda ):
"""
Returns a dictionary based on the HDA in summary form::
{
'id' : < the encoded dataset id >,
'name' : < currently only returns 'file' >,
'type' : < name of the dataset >,
'url' : < api url to retrieve this datasets full data >,
}
"""
api_type = "file"
encoded_id = trans.security.encode_id( hda.id )
# TODO: handle failed_metadata here as well
return {
'id' : encoded_id,
'history_id' : encoded_history_id,
'name' : hda.name,
'type' : api_type,
'state' : hda.dataset.state,
'deleted': hda.deleted,
'visible': hda.visible,
'purged': hda.purged,
'resubmitted': hda._state == trans.app.model.Dataset.states.RESUBMITTED,
'hid' : hda.hid,
'history_content_type' : hda.history_content_type,
'url' : url_for( 'history_content_typed', history_id=encoded_history_id, id=encoded_id, type="dataset" ),
}
def __collection_dict( self, trans, dataset_collection_instance, view="collection" ):
return dictify_dataset_collection_instance( dataset_collection_instance,
security=trans.security, parent=dataset_collection_instance.history, view=view )
def _detailed_hda_dict( self, trans, hda ):
"""
Detailed dictionary of hda values.
"""
try:
hda_dict = self.get_hda_dict( trans, hda )
hda_dict[ 'display_types' ] = self.get_old_display_applications( trans, hda )
hda_dict[ 'display_apps' ] = self.get_display_apps( trans, hda )
return hda_dict
except Exception, exc:
# catch error here - returning a briefer hda_dict with an error attribute
log.exception( "Error in history API at listing contents with history %s, hda %s: (%s) %s",
hda.history_id, hda.id, type( exc ), str( exc ) )
return self.get_hda_dict_with_error( trans, hda=hda, error_msg=str( exc ) )
@expose_api_anonymous
def show( self, trans, id, history_id, **kwd ):
"""
show( self, trans, id, history_id, **kwd )
* GET /api/histories/{history_id}/contents/{id}
return detailed information about an HDA within a history
.. note:: Anonymous users are allowed to get their current history contents
:type id: str
:param ids: the encoded id of the HDA to return
:type history_id: str
:param history_id: encoded id string of the HDA's History
:rtype: dict
:returns: dictionary containing detailed HDA information
.. seealso:: :func:`galaxy.web.base.controller.UsesHistoryDatasetAssociationMixin.get_hda_dict`
"""
contents_type = kwd.get('type', 'dataset')
if contents_type == 'dataset':
return self.__show_dataset( trans, id, **kwd )
elif contents_type == 'dataset_collection':
return self.__show_dataset_collection( trans, id, history_id, **kwd )
else:
return self.__handle_unknown_contents_type( trans, contents_type )
def __show_dataset_collection( self, trans, id, history_id, **kwd ):
try:
service = trans.app.dataset_collections_service
dataset_collection_instance = service.get_dataset_collection_instance(
trans=trans,
instance_type='history',
id=id,
)
return self.__collection_dict( trans, dataset_collection_instance, view="element" )
except Exception, e:
log.exception( "Error in history API at listing dataset collection: %s", e )
trans.response.status = 500
return msg
def __show_dataset( self, trans, id, **kwd ):
hda = self.mgrs.hdas.get( trans, self._decode_id( trans, id ), check_ownership=False, check_accessible=True )
#if hda.history.id != self._decode_id( trans, history_id ):
# raise exceptions.ObjectNotFound( 'dataset was not found in this history' )
hda_dict = self.get_hda_dict( trans, hda )
hda_dict[ 'display_types' ] = self.get_old_display_applications( trans, hda )
hda_dict[ 'display_apps' ] = self.get_display_apps( trans, hda )
return hda_dict
@expose_api_anonymous
def create( self, trans, history_id, payload, **kwd ):
"""
create( self, trans, history_id, payload, **kwd )
* POST /api/histories/{history_id}/contents/{type}
create a new HDA by copying an accessible LibraryDataset
:type history_id: str
:param history_id: encoded id string of the new HDA's History
:type type: str
:param type: Type of history content - 'dataset' (default) or
'dataset_collection'.
:type payload: dict
:param payload: dictionary structure containing::
copy from library (for type 'dataset'):
'source' = 'library'
'content' = [the encoded id from the library dataset]
copy from history dataset (for type 'dataset'):
'source' = 'hda'
'content' = [the encoded id from the HDA]
copy from history dataset collection (for type 'dataset_collection')
'source' = 'hdca'
'content' = [the encoded id from the HDCA]
create new history dataset collection (for type 'dataset_collection')
'source' = 'new_collection' (default 'source' if type is
'dataset_collection' - no need to specify this)
'collection_type' = For example, "list", "paired", "list:paired".
'name' = Name of new dataset collection.
'element_identifiers' = Recursive list structure defining collection.
Each element must have 'src' which can be
'hda', 'ldda', 'hdca', or 'new_collection',
as well as a 'name' which is the name of
element (e.g. "forward" or "reverse" for
paired datasets, or arbitrary sample names
for instance for lists). For all src's except
'new_collection' - a encoded 'id' attribute
must be included wiht element as well.
'new_collection' sources must defined a
'collection_type' and their own list of
(potentially) nested 'element_identifiers'.
..note:
Currently, a user can only copy an HDA from a history that the user owns.
:rtype: dict
:returns: dictionary containing detailed information for the new HDA
"""
# TODO: Flush out create new collection documentation above, need some
# examples. See also bioblend and API tests for specific examples.
# get the history, if anon user and requesting current history - allow it
if( ( trans.user == None )
and ( history_id == trans.security.encode_id( trans.history.id ) ) ):
history = trans.history
# otherwise, check permissions for the history first
else:
history = self.mgrs.histories.get( trans, self._decode_id( trans, history_id ),
check_ownership=True, check_accessible=True )
type = payload.get('type', 'dataset')
if type == 'dataset':
return self.__create_dataset( trans, history, payload, **kwd )
elif type == 'dataset_collection':
return self.__create_dataset_collection( trans, history, payload, **kwd )
else:
return self.__handle_unknown_contents_type( trans, type )
def __create_dataset( self, trans, history, payload, **kwd ):
source = payload.get( 'source', None )
if source not in ( 'library', 'hda' ):
raise exceptions.RequestParameterInvalidException(
"'source' must be either 'library' or 'hda': %s" %( source ) )
content = payload.get( 'content', None )
if content is None:
raise exceptions.RequestParameterMissingException( "'content' id of lda or hda is missing" )
# copy from library dataset
hda = None
if source == 'library':
ld = self.get_library_dataset( trans, content, check_ownership=False, check_accessible=False )
#TODO: why would get_library_dataset NOT return a library dataset?
if type( ld ) is not trans.app.model.LibraryDataset:
raise exceptions.RequestParameterInvalidException(
"Library content id ( %s ) is not a dataset" % content )
# insert into history
hda = ld.library_dataset_dataset_association.to_history_dataset_association( history, add_to_history=True )
# copy an existing, accessible hda
elif source == 'hda':
unencoded_hda_id = self._decode_id( trans, content )
original = self.mgrs.hdas.get( trans, unencoded_hda_id, check_ownership=False, check_accessible=True )
data_copy = original.copy( copy_children=True )
hda = history.add_dataset( data_copy )
trans.sa_session.flush()
if not hda:
return None
#TODO: duplicate code - use a serializer with a view
hda_dict = self.get_hda_dict( trans, hda )
hda_dict[ 'display_types' ] = self.get_old_display_applications( trans, hda )
hda_dict[ 'display_apps' ] = self.get_display_apps( trans, hda )
return hda_dict
def __create_dataset_collection( self, trans, history, payload, **kwd ):
source = kwd.get("source", "new_collection")
service = trans.app.dataset_collections_service
if source == "new_collection":
create_params = api_payload_to_create_params( payload )
dataset_collection_instance = service.create(
trans,
parent=history,
**create_params
)
elif source == "hdca":
content = payload.get( 'content', None )
if content is None:
raise exceptions.RequestParameterMissingException( "'content' id of target to copy is missing" )
dataset_collection_instance = service.copy(
trans=trans,
parent=history,
source="hdca",
encoded_source_id=content,
)
else:
message = "Invalid 'source' parameter in request %s" % source
raise exceptions.RequestParameterInvalidException(message)
return self.__collection_dict( trans, dataset_collection_instance, view="element" )
@expose_api_anonymous
def update( self, trans, history_id, id, payload, **kwd ):
"""
update( self, trans, history_id, id, payload, **kwd )
* PUT /api/histories/{history_id}/contents/{id}
updates the values for the HDA with the given ``id``
:type history_id: str
:param history_id: encoded id string of the HDA's History
:type id: str
:param id: the encoded id of the history to undelete
:type payload: dict
:param payload: a dictionary containing any or all the
fields in :func:`galaxy.model.HistoryDatasetAssociation.to_dict`
and/or the following:
* annotation: an annotation for the HDA
:rtype: dict
:returns: an error object if an error occurred or a dictionary containing
any values that were different from the original and, therefore, updated
"""
#TODO: PUT /api/histories/{encoded_history_id} payload = { rating: rating } (w/ no security checks)
contents_type = kwd.get('type', 'dataset')
if contents_type == "dataset":
return self.__update_dataset( trans, history_id, id, payload, **kwd )
elif contents_type == "dataset_collection":
return self.__update_dataset_collection( trans, history_id, id, payload, **kwd )
else:
return self.__handle_unknown_contents_type( trans, contents_type )
def __update_dataset( self, trans, history_id, id, payload, **kwd ):
changed = {}
# anon user
if trans.user == None:
if history_id != trans.security.encode_id( trans.history.id ):
raise exceptions.AuthenticationRequired( 'You must be logged in to update this history' )
anon_allowed_payload = {}
if 'deleted' in payload:
anon_allowed_payload[ 'deleted' ] = payload[ 'deleted' ]
if 'visible' in payload:
anon_allowed_payload[ 'visible' ] = payload[ 'visible' ]
payload = self._validate_and_parse_update_payload( anon_allowed_payload )
hda = self.mgrs.hdas.get( trans, self._decode_id( trans, id ),
check_ownership=False, check_accessible=False )
hda = self.mgrs.hdas.err_if_uploading( trans, hda )
if hda.history != trans.history:
raise exceptions.AuthenticationRequired( 'You must be logged in to update this dataset' )
else:
payload = self._validate_and_parse_update_payload( payload )
# only check_state if not deleting, otherwise cannot delete uploading files
check_state = not payload.get( 'deleted', False )
hda = self.mgrs.hdas.get( trans, self._decode_id( trans, id ),
check_ownership=True, check_accessible=True )
if check_state:
hda = self.mgrs.hdas.err_if_uploading( trans, hda )
#hda = self.get_dataset( trans, id, check_ownership=True, check_accessible=True, check_state=check_state )
if hda and isinstance( hda, trans.model.HistoryDatasetAssociation ):
changed = self.set_hda_from_dict( trans, hda, payload )
if payload.get( 'deleted', False ):
self.stop_hda_creating_job( hda )
return changed
def __update_dataset_collection( self, trans, history_id, id, payload, **kwd ):
return trans.app.dataset_collections_service.update( trans, "history", id, payload )
#TODO: allow anonymous del/purge and test security on this
@expose_api
def delete( self, trans, history_id, id, purge=False, **kwd ):
"""
delete( self, trans, history_id, id, **kwd )
* DELETE /api/histories/{history_id}/contents/{id}
delete the HDA with the given ``id``
.. note:: Currently does not stop any active jobs for which this dataset is an output.
:type id: str
:param id: the encoded id of the history to delete
:type purge: bool
:param purge: if True, purge the HDA
:type kwd: dict
:param kwd: (optional) dictionary structure containing:
* payload: a dictionary itself containing:
* purge: if True, purge the HDA
.. note:: that payload optionally can be placed in the query string of the request.
This allows clients that strip the request body to still purge the dataset.
:rtype: dict
:returns: an error object if an error occurred or a dictionary containing:
* id: the encoded id of the history,
* deleted: if the history was marked as deleted,
* purged: if the history was purged
"""
contents_type = kwd.get('type', 'dataset')
if contents_type == "dataset":
return self.__delete_dataset( trans, history_id, id, purge=purge, **kwd )
elif contents_type == "dataset_collection":
trans.app.dataset_collections_service.delete( trans, "history", id )
return { 'id' : id, "deleted": True }
else:
return self.__handle_unknown_contents_type( trans, contents_type )
def __delete_dataset( self, trans, history_id, id, purge, **kwd ):
# get purge from the query or from the request body payload (a request body is optional here)
purge = util.string_as_bool( purge )
if kwd.get( 'payload', None ):
# payload takes priority
purge = util.string_as_bool( kwd['payload'].get( 'purge', purge ) )
hda = self.mgrs.hdas.get( trans, self._decode_id( trans, id ),
check_ownership=True, check_accessible=True )
self.mgrs.hdas.err_if_uploading( trans, hda )
rval = { 'id' : id }
hda.deleted = True
if purge:
if not trans.app.config.allow_user_dataset_purge:
raise exceptions.ConfigDoesNotAllowException( 'This instance does not allow user dataset purging' )
hda.purged = True
trans.sa_session.add( hda )
trans.sa_session.flush()
if hda.dataset.user_can_purge:
try:
hda.dataset.full_delete()
trans.sa_session.add( hda.dataset )
except:
pass
# flush now to preserve deleted state in case of later interruption
trans.sa_session.flush()
rval[ 'purged' ] = True
self.stop_hda_creating_job( hda )
trans.sa_session.flush()
rval[ 'deleted' ] = True
return rval
def _validate_and_parse_update_payload( self, payload ):
"""
Validate and parse incomming data payload for an HDA.
"""
# This layer handles (most of the stricter idiot proofing):
# - unknown/unallowed keys
# - changing data keys from api key to attribute name
# - protection against bad data form/type
# - protection against malicious data content
# all other conversions and processing (such as permissions, etc.) should happen down the line
# keys listed here don't error when attempting to set, but fail silently
# this allows PUT'ing an entire model back to the server without attribute errors on uneditable attrs
valid_but_uneditable_keys = (
'id', 'name', 'type', 'api_type', 'model_class', 'history_id', 'hid',
'accessible', 'purged', 'state', 'data_type', 'file_ext', 'file_size', 'misc_blurb',
'download_url', 'visualizations', 'display_apps', 'display_types',
'metadata_dbkey', 'metadata_column_names', 'metadata_column_types', 'metadata_columns',
'metadata_comment_lines', 'metadata_data_lines'
)
validated_payload = {}
for key, val in payload.items():
if val is None:
continue
if key in ( 'name', 'genome_build', 'misc_info', 'annotation' ):
val = self.validate_and_sanitize_basestring( key, val )
#TODO: need better remap system or eliminate the need altogether
key = 'dbkey' if key == 'genome_build' else key
key = 'info' if key == 'misc_info' else key
validated_payload[ key ] = val
if key in ( 'deleted', 'visible' ):
validated_payload[ key ] = self.validate_boolean( key, val )
elif key == 'tags':
validated_payload[ key ] = self.validate_and_sanitize_basestring_list( key, val )
elif key not in valid_but_uneditable_keys:
pass
#log.warn( 'unknown key: %s', str( key ) )
return validated_payload
def __handle_unknown_contents_type( self, trans, contents_type ):
raise exceptions.UnknownContentsType('Unknown contents type: %s' % type)
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/api/history_contents.py
|
Python
|
gpl-3.0
| 25,537
|
[
"Galaxy"
] |
e2bce10d21366df73c85b8f7720a117ec948ed23790074311c2f838a3652ab54
|
"""Tools for managing evaluation contexts. """
from __future__ import print_function, division
from sympy.utilities.iterables import dict_merge
from sympy.polys.polyutils import PicklableWithSlots
__known_options__ = set(['frac', 'gens', 'wrt', 'sort', 'order', 'domain',
'modulus', 'gaussian', 'extension', 'field', 'greedy', 'symmetric'])
__global_options__ = []
__template__ = """\
def %(option)s(_%(option)s):
return Context(%(option)s=_%(option)s)
"""
for option in __known_options__:
exec(__template__ % { 'option': option })
class Context(PicklableWithSlots):
__slots__ = ['__options__']
def __init__(self, dict=None, **options):
if dict is not None:
self.__options__ = dict_merge(dict, options)
else:
self.__options__ = options
def __getattribute__(self, name):
if name in __known_options__:
try:
return object.__getattribute__(self, '__options__')[name]
except KeyError:
return None
else:
return object.__getattribute__(self, name)
def __str__(self):
return 'Context(%s)' % ', '.join(
[ '%s=%r' % (key, value) for key, value in self.__options__.iteritems() ])
def __and__(self, other):
if isinstance(other, Context):
return Context(**dict_merge(self.__options__, other.__options__))
else:
raise TypeError("a context manager expected, got %s" % other)
def __enter__(self):
raise NotImplementedError('global context')
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError('global context')
def register_context(func):
def wrapper(self, *args, **kwargs):
return func(*args, **dict_merge(self.__options__, kwargs))
wrapper.__doc__ = func.__doc__
wrapper.__name__ = func.__name__
setattr(Context, func.__name__, wrapper)
return func
|
hrashk/sympy
|
sympy/polys/polycontext.py
|
Python
|
bsd-3-clause
| 1,944
|
[
"Gaussian"
] |
9f72372a4d377a838a5a149946714045a78bc32b7ed87615be5e6978a2da6f42
|
#
# @file TestAlgebraicRule.py
# @brief AlgebraicRule unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestAlgebraicRule.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestAlgebraicRule(unittest.TestCase):
global AR
AR = None
def setUp(self):
self.AR = libsbml.AlgebraicRule(2,4)
if (self.AR == None):
pass
pass
def tearDown(self):
_dummyList = [ self.AR ]; _dummyList[:] = []; del _dummyList
pass
def test_AlgebraicRule_create(self):
self.assert_( self.AR.getTypeCode() == libsbml.SBML_ALGEBRAIC_RULE )
self.assert_( self.AR.getMetaId() == "" )
self.assert_( self.AR.getNotes() == None )
self.assert_( self.AR.getAnnotation() == None )
self.assert_( self.AR.getFormula() == "" )
self.assert_( self.AR.getMath() == None )
pass
def test_AlgebraicRule_createWithFormula(self):
ar = libsbml.AlgebraicRule(2,4)
ar.setFormula( "1 + 1")
self.assert_( ar.getTypeCode() == libsbml.SBML_ALGEBRAIC_RULE )
self.assert_( ar.getMetaId() == "" )
math = ar.getMath()
self.assert_( math != None )
formula = libsbml.formulaToString(math)
self.assert_( formula != None )
self.assert_(( "1 + 1" == formula ))
self.assert_(( formula == ar.getFormula() ))
_dummyList = [ ar ]; _dummyList[:] = []; del _dummyList
pass
def test_AlgebraicRule_createWithMath(self):
math = libsbml.parseFormula("1 + 1")
ar = libsbml.AlgebraicRule(2,4)
ar.setMath(math)
self.assert_( ar.getTypeCode() == libsbml.SBML_ALGEBRAIC_RULE )
self.assert_( ar.getMetaId() == "" )
self.assert_(( "1 + 1" == ar.getFormula() ))
self.assert_( ar.getMath() != math )
_dummyList = [ ar ]; _dummyList[:] = []; del _dummyList
pass
def test_AlgebraicRule_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(2,3)
sbmlns.addNamespaces(xmlns)
r = libsbml.AlgebraicRule(sbmlns)
self.assert_( r.getTypeCode() == libsbml.SBML_ALGEBRAIC_RULE )
self.assert_( r.getMetaId() == "" )
self.assert_( r.getNotes() == None )
self.assert_( r.getAnnotation() == None )
self.assert_( r.getLevel() == 2 )
self.assert_( r.getVersion() == 3 )
self.assert_( r.getNamespaces() != None )
self.assert_( r.getNamespaces().getLength() == 2 )
_dummyList = [ r ]; _dummyList[:] = []; del _dummyList
pass
def test_AlgebraicRule_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestAlgebraicRule))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestAlgebraicRule.py
|
Python
|
bsd-3-clause
| 4,067
|
[
"VisIt"
] |
d53a9c0eb3d887255839596a8e89f4c8d2b68193f66c34adaddb714b3c8bbc94
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import logging
import string
import re
import url as urlpy
import ipaddress
from textcode import analysis
from cluecode import finder_data
LOG = logging.getLogger(__name__)
DEBUG = False
"""
Find patterns in text lines such as a emails and URLs.
Optionally apply filters to pattern matches.
"""
def find(location, patterns):
"""
Yield match and matched lines for patterns found in file at location as a
tuple of (key, found text, text line). Pattern is list of tuples (key,
compiled regex).
Note: the location can be a list of lines for testing convenience.
"""
if DEBUG:
from pprint import pformat
loc = pformat(location)
print('find(location=%(loc)r,\n patterns=%(patterns)r)' % locals())
for i, line in enumerate(analysis.text_lines(location)):
lineno = i + 1
for key, pattern in patterns:
for match in pattern.findall(line):
if DEBUG:
print('find: yielding match: key=%(key)r, '
'match=%(match)r,\n line=%(line)r' % locals())
yield key, unicode(match), line, lineno
def find_and_filter(location, patterns, filters, unique=True):
"""
Yield match and matched line number for patterns found in file at location
as a tuple of (found text, line number). Pattern is list of tuples (key,
compiled regex).
Note: the location can be a list of lines for testing convenience.
"""
def unique_filter(matches):
"""
Iterate over matches and yield unique matches.
"""
uniques = set()
for key, match, line, lineno in matches:
if (key, match,) in uniques:
continue
uniques.add((key, match,))
yield key, match, line, lineno
def apply_filters(matches, *filters):
"""
Apply a sequence of `filters` to a `matches` iterable. Return a new filtered
matches iterable.
A filter must accept a single arg: an iterable of tuples of (key, match,
line, lineno) and must return an iterable of tuples of (key, match, line,
lineno).
"""
for filt in filters:
matches = filt(matches)
return matches
def build_regex_filter(pattern):
"""
Return a filter function using regex pattern, filtering out matches
matching this regex. The pattern should be text, not a compiled re.
"""
def re_filt(matches):
for key, match, line, lineno in matches:
if re.match(regex, match):
if DEBUG:
print('build_regex_filter(pattern=%(pattern)r: '
'filtering match: %(match)r' % locals())
continue
yield key, match, line, lineno
regex = re.compile(pattern, re.UNICODE | re.I)
return re_filt
# A good reference page of email address regex is:
# http://fightingforalostcause.net/misc/2006/compare-email-regex.php email
# regex from http://www.regular-expressions.info/regexbuddy/email.html
def emails_regex():
return re.compile(r'\b[A-Z0-9._%-]+@[A-Z0-9.-]+\.[A-Z]{2,4}\b', re.IGNORECASE)
def find_emails(location, unique=True):
"""
Yield emails found in file at location.
Only return unique items if unique is True.
"""
patterns = [('emails', emails_regex(),)]
matches = find(location, patterns)
filters = (junk_email_domains_filter,)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, email, _line, lineno in matches:
yield email, lineno
def junk_email_domains_filter(matches):
"""
Given an iterable of email matches, return an iterable where email with
common uninteresting domains have been removed, such as local, non public
or example.com emails.
"""
for key, email, line, lineno in matches:
domain = email.split('@')[-1]
if not is_good_host(domain):
continue
yield key, email, line, lineno
def uninteresting_emails_filter(matches):
"""
Given an iterable of emails matches, return an iterable where common
uninteresting emails have been removed.
"""
for key, email, line, lineno in matches:
good_email = finder_data.classify_email(email)
if not good_email:
continue
yield key, email, line, lineno
# TODO: consider: http://www.regexguru.com/2008/11/detecting-urls-in-a-block-of-text/
# TODO: consider: http://blog.codinghorror.com/the-problem-with-urls/
schemes = 'https?|ftps?|sftp|rsync|ssh|svn|git|hg|https?\+git|https?\+svn|https?\+hg'
url_body = '[^\s<>\[\]"]'
def urls_regex():
# no space, no < >, no [ ] and no double quote
return re.compile(r'''
(
# URLs with schemes
(?:%(schemes)s)://%(url_body)s+
|
# common URLs prefix without schemes
(?:www|ftp)\.%(url_body)s+
|
# git style [email protected]:christophercantu/pipeline.git
git\@%(url_body)s+:%(url_body)s+\.git
)''' % globals()
, re.UNICODE | re.VERBOSE | re.IGNORECASE)
INVALID_URLS_PATTERN = '((?:' + schemes + ')://([$%*/_])+)'
def find_urls(location, unique=True):
"""
Yield urls found in file at location.
Only return unique items if unique is True.
"""
patterns = [('urls', urls_regex(),)]
matches = find(location, patterns)
# the order of filters IS important
filters = (
verbatim_crlf_url_cleaner,
end_of_url_cleaner,
empty_urls_filter,
scheme_adder,
user_pass_cleaning_filter,
build_regex_filter(INVALID_URLS_PATTERN),
canonical_url_cleaner,
junk_url_hosts_filter,
junk_urls_filter,
)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, url, _line, lineno in matches:
yield unicode(url), lineno
EMPTY_URLS = set(['https', 'http', 'ftp', 'www', ])
def empty_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable without empty URLs.
"""
for key, match, line, lineno in matches:
junk = match.lower().strip(string.punctuation).strip()
if not junk or junk in EMPTY_URLS:
if DEBUG:
print('empty_urls_filter: filtering match: %(match)r'
% locals())
continue
yield key, match, line, lineno
def verbatim_crlf_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where literal end of
lines and carriage return characters that may show up as-is, un-encoded in
a URL have been removed.
"""
# FIXME: when is this possible and could happen?
for key, url, line, lineno in matches:
if not url.endswith('/'):
url = url.replace(r'\n', '')
url = url.replace(r'\r', '')
yield key, url, line, lineno
def end_of_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where junk characters
commonly found at the end of a URL are removed.
This is not entirely correct, but works practically.
"""
for key, url, line, lineno in matches:
if not url.endswith('/'):
url = url.replace(u'<', u'<')
url = url.replace(u'>', u'>')
url = url.replace(u'&', u'&')
url = url.rstrip(string.punctuation)
url = url.split(u'\\')[0]
url = url.split(u'<')[0]
url = url.split(u'>')[0]
url = url.split(u'(')[0]
url = url.split(u')')[0]
url = url.split(u'[')[0]
url = url.split(u']')[0]
url = url.split(u'"')[0]
url = url.split(u"'")[0]
yield key, url, line, lineno
non_standard_urls_prefix = ('git@',)
def is_filterable(url):
"""
Return True if a url is eligible for filtering. Certain URLs should not pass
through certain filters (such as a [email protected] style urls)
"""
return not url.startswith(non_standard_urls_prefix)
def scheme_adder(matches):
"""
Add a fake http:// scheme if there was none.
"""
for key, match, line, lineno in matches:
if is_filterable(match):
match = add_fake_scheme(match)
yield key, match, line, lineno
def add_fake_scheme(url):
"""
Add a fake http:// scheme to URL if has none.
"""
if not has_scheme(url):
url = u'http://' + url.lstrip(u':/').strip()
return url
def has_scheme(url):
"""
Return True if url has a scheme.
"""
return re.match('^(?:%(schemes)s)://.*' % globals(), url)
def user_pass_cleaning_filter(matches):
"""
Given an iterable of URL matches, return an iterable where user and
password are removed from the URLs host.
"""
for key, match, line, lineno in matches:
if is_filterable(match):
host, _domain = url_host_domain(match)
if not host:
if DEBUG:
print('user_pass_cleaning_filter: '
'filtering match(no host): %(match)r' % locals())
continue
if '@' in host:
# strips any user/pass
host = host.split(u'@')[-1]
yield key, match, line, lineno
def canonical_url(uri):
"""
Return the canonical representation of a given URI.
This assumes the `uri` has a scheme.
* When a default port corresponding for the scheme is explicitly declared
(such as port 80 for http), the port will be removed from the output.
* Fragments '#' are not removed.
* Params and query string arguments are not reordered.
"""
normalized = urlpy.parse(uri).sanitize().punycode()
if normalized._port == urlpy.PORTS.get(normalized._scheme, None):
normalized._port = None
return normalized.utf8()
def canonical_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where URLs have been
canonicalized.
"""
for key, match, line, lineno in matches:
if is_filterable(match):
match = canonical_url(match)
if DEBUG:
print('canonical_url_cleaner: '
'match=%(match)r, canonic=%(canonic)r' % locals())
yield key, match , line, lineno
IP_V4_RE = r'^(\d{1,3}\.){0,3}\d{1,3}$'
def is_ip_v4(s):
return re.compile(IP_V4_RE).match(s)
IP_V6_RE = (
r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}$'
'|'
r'^([0-9a-f]{0,4}:){2,6}(\d{1,3}\.){0,3}\d{1,3}$'
)
def is_ip_v6(s):
"""
Return True is string s is an IP V6 address
"""
return re.compile(IP_V6_RE).match(s)
def is_ip(s):
"""
Return True is string s is an IP address
"""
return is_ip_v4(s) or is_ip_v6(s)
def get_ip(s):
"""
Return True is string s is an IP address
"""
if not is_ip(s):
return False
try:
ip = ipaddress.ip_address(unicode(s))
return ip
except ValueError:
return False
def is_private_ip(ip):
"""
Return true if ip object is a private or local IP.
"""
if ip:
if isinstance(ip, ipaddress.IPv4Address):
private = (
ip.is_reserved
or ip.is_private
or ip.is_multicast
or ip.is_unspecified
or ip.is_loopback
or ip.is_link_local
)
else:
private(
ip.is_multicast
or ip.is_reserved
or ip.is_link_local
or ip.is_site_local
or ip.is_private
or ip.is_unspecified
or ip.is_loopback
)
return private
def is_good_host(host):
"""
Return True if the host is not some local or uninteresting host.
"""
if not host:
return False
ip = get_ip(host)
if ip:
if is_private_ip(ip):
return False
return finder_data.classify_ip(host)
# at this stage we have a host name, not an IP
if '.' not in host:
# private hostnames not in a domain, including localhost
return False
good_host = finder_data.classify_host(host)
return good_host
def url_host_domain(url):
"""
Return a tuple of the (host, domain) of a URL or None. Assumes that the
URL has a scheme.
"""
parsed = urlpy.parse(url)
host = parsed._host
if not host:
return None, None
host = host.lower()
domain = parsed.pld().lower()
return host, domain
def junk_url_hosts_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting hosts or domains have been removed, such as local,
non public or example.com URLs.
"""
for key, match, line, lineno in matches:
if is_filterable(match):
host, domain = url_host_domain(match)
if not is_good_host(host):
if DEBUG:
print('junk_url_hosts_filter: '
'!is_good_host:%(host)r): %(match)r' % locals())
continue
if not is_good_host(domain) and not is_ip(host):
if DEBUG:
print('junk_url_hosts_filter: ''!is_good_host:%(domain)r '
'and !is_ip:%(host)r: %(match)r' % locals())
continue
yield key, match, line, lineno
def junk_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting URLs, or uninteresting URL hosts or domains have been
removed, such as local, non public or example.com URLs.
"""
for key, match, line, lineno in matches:
good_url = finder_data.classify_url(match)
if not good_url:
if DEBUG:
print('junk_url_filter: %(match)r' % locals())
continue
yield key, match, line, lineno
def find_pattern(location, pattern, unique=False):
"""
Find regex pattern in the text lines of file at location.
Return all match groups joined as one unicode string.
Only return unique items if unique is True.
"""
pattern = re.compile(pattern, re.UNICODE | re.I)
matches = find(location, [(None, pattern,)])
if unique:
matches = unique_filter(matches)
for _key, match , _line, lineno in matches:
yield match, lineno
|
yashdsaraf/scancode-toolkit
|
src/cluecode/finder.py
|
Python
|
apache-2.0
| 15,886
|
[
"VisIt"
] |
0d1218d0125fa7b9de35a46ef555e714eb4d99d1df3979ea788f4aa2e3da529d
|
# Copyright (C) 2010 by Kevin Saff
# This file is part of the CA scanner.
# The CA scanner is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# The CA scanner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with the CA scanner. If not, see <http://www.gnu.org/licenses/>.
#Implementations of file formats should handle resources in terms of qdict's,
# as defined in qdict.qdict.
import numpy
from charts._chart import chart
def read(filename, file=None):
if filename.endswith('.zip'):
import native
return native.read(filename, file)
elif filename.endswith('.mcl'):
import mcell
return mcell.read(filename, file)
elif filename.endswith('.png'):
import png
return png.read(filename, file)
elif filename.endswith('.fits'):
import fits
return fits.read(filename, file)
elif filename.startswith('meta') and filename.endswith('txt'):
import meta
return meta.read(filename, file)
else:
raise ValueError, 'Do not understand how to read file "%s"' % filename
def write(filename, data, file=None, chart=(0,0)):
if filename.endswith('.zip'):
import native
return native.write(filename, data, file, chart)
elif filename.endswith('.png'):
import png
return png.write(filename, data, file, chart)
elif filename.endswith('.fits'):
import fits
return fits.write(filename, data, file, chart)
else:
raise ValueError, 'Do not understand how to write file "%s"' % filename
def unwrap(world, view, data):
if 'rule' in data:
world.rule = data['rule']
unwrap_atlases(data, world)
if 'topology' in data:
world.topology = data['topology']
if 'toys' in data:
world.toys = data['toys']
if 'palette' in data:
view.palette = data['palette']
if 'speed' in data:
view.speed = data['speed']
def wrap(world, view):
result = {'rule': world.rule,
'topology': world.topology,
'toys': world.toys,
'generation': world.generation,
'palette': view.palette,
'speed': view.speed,
'zoom': view.zoom,
'center': view.center,
}
wrap_atlases(world, result)
return result
def unwrap_chart(data):
if data.dtype == numpy.uint8:
return chart('bytescan',
data=data)
elif data.dtype == numpy.float:
return chart('floatscan',
data=data)
def unwrap_atlas(atlas):
return [unwrap_chart(chart) for chart in atlas]
def unwrap_atlases(data, world):
atlases = get_atlases(data)
if len(atlases) > 0:
world.charts = atlases[0]
if len(atlases) > 1:
world._scratch_charts = atlases[1]
def wrap_atlases(world, resource):
if getattr(world.rule, 'history', False):
atlases = ([chart.data for chart in world.charts],
[chart.data for chart in world._scratch_charts])
else:
atlases = ([chart.data for chart in world.charts],)
set_atlases(resource, atlases)
def set_atlases(resource, atlases):
for x, atlas in enumerate(atlases):
for y, chart in enumerate(atlas):
resource['chart(%d,%d)' % (x, y)] = chart
def get_atlases(resource):
atlases = []
for key in resource.keys():
if key.startswith('chart('):
nos = [int(x) for x in key.strip('chart()').split(',')]
while len(atlases) <= nos[0]:
atlases.append([])
while len(atlases[nos[0]]) <= nos[1]:
atlases[nos[0]].append(None)
atlases[nos[0]][nos[1]] = resource[key]
return atlases
def get_subscripts(filename):
try:
return tuple([int(x) for x in filename.split('.')[1].split('-')])
except ValueError:
return (0,0)
|
kcsaff/CA
|
src/formats/__init__.py
|
Python
|
gpl-3.0
| 4,524
|
[
"MCell"
] |
7df54d141d09c1564411b83bb8161b42fc632fa3f7b1137d92f65d6895b8f836
|
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, unicode_literals
from html5lib import treebuilders, inputstream
from xhtml2pdf.default import TAGS, STRING, INT, BOOL, SIZE, COLOR, FILE
from xhtml2pdf.default import BOX, POS, MUST, FONT
from xhtml2pdf.util import get_size, str_to_bool, to_list, get_color, get_alignment
from xhtml2pdf.util import get_box, get_position, PisaTempFile
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import PageBreak, KeepInFrame
from xhtml2pdf.xhtml2pdf_reportlab import PmlRightPageBreak, PmlLeftPageBreak
from xhtml2pdf.tags import * # TODO: Kill wild import!
from xhtml2pdf.tables import * # TODO: Kill wild import!
from xhtml2pdf.util import * # TODO: Kill wild import!
from xml.dom import Node
import copy
import html5lib
import logging
import re
import sys
#support python 3
#import types
if sys.version[0] == '2':
StringTypes = (str,unicode)
else:
StringTypes = (str,)
import xhtml2pdf.w3c.cssDOMElementInterface as cssDOMElementInterface
import xml.dom.minidom
from six import text_type
CSSAttrCache = {}
log = logging.getLogger("xhtml2pdf")
rxhttpstrip = re.compile("https?://[^/]+(.*)", re.M | re.I)
class AttrContainer(dict):
def __getattr__(self, name):
try:
return dict.__getattr__(self, name)
except:
return self[name]
def pisaGetAttributes(c, tag, attributes):
global TAGS
attrs = {}
if attributes:
for k, v in attributes.items():
try:
attrs[str(k)] = str(v) # XXX no Unicode! Reportlab fails with template names
except:
attrs[k] = v
nattrs = {}
if tag in TAGS:
block, adef = TAGS[tag]
adef["id"] = STRING
# print block, adef
try:
iteritems = adef.iteritems()
except Exception:
iteritems = iter(adef.items())
for k, v in iteritems:
nattrs[k] = None
# print k, v
# defaults, wenn vorhanden
if type(v) == tuple:
if v[1] == MUST:
if k not in attrs:
log.warn(c.warning("Attribute '%s' must be set!", k))
nattrs[k] = None
continue
nv = attrs.get(k, v[1])
dfl = v[1]
v = v[0]
else:
nv = attrs.get(k, None)
dfl = None
if nv is not None:
if type(v) == list:
nv = nv.strip().lower()
if nv not in v:
#~ raise PML_EXCEPTION, "attribute '%s' of wrong value, allowed is one of: %s" % (k, repr(v))
log.warn(c.warning("Attribute '%s' of wrong value, allowed is one of: %s", k, repr(v)))
nv = dfl
elif v == BOOL:
nv = nv.strip().lower()
nv = nv in ("1", "y", "yes", "true", str(k))
elif v == SIZE:
try:
nv = get_size(nv)
except:
log.warn(c.warning("Attribute '%s' expects a size value", k))
elif v == BOX:
nv = get_box(nv, c.pageSize)
elif v == POS:
nv = get_position(nv, c.pageSize)
elif v == INT:
nv = int(nv)
elif v == COLOR:
nv = get_color(nv)
elif v == FILE:
nv = c.get_file(nv)
elif v == FONT:
nv = c.get_font_name(nv)
nattrs[k] = nv
return AttrContainer(nattrs)
attrNames = '''
color
font-family
font-size
font-weight
font-style
text-decoration
line-height
letter-spacing
background-color
display
margin-left
margin-right
margin-top
margin-bottom
padding-left
padding-right
padding-top
padding-bottom
border-top-color
border-top-style
border-top-width
border-bottom-color
border-bottom-style
border-bottom-width
border-left-color
border-left-style
border-left-width
border-right-color
border-right-style
border-right-width
text-align
vertical-align
width
height
zoom
page-break-after
page-break-before
list-style-type
list-style-image
white-space
text-indent
-pdf-page-break
-pdf-frame-break
-pdf-next-page
-pdf-keep-with-next
-pdf-outline
-pdf-outline-level
-pdf-outline-open
-pdf-line-spacing
-pdf-keep-in-frame-mode
-pdf-word-wrap
'''.strip().split()
def getCSSAttr(self, cssCascade, attrName, default=NotImplemented):
if attrName in self.cssAttrs:
return self.cssAttrs[attrName]
try:
result = cssCascade.findStyleFor(self.cssElement, attrName, default)
except LookupError:
result = None
# XXX Workaround for inline styles
try:
style = self.cssStyle
except:
style = self.cssStyle = cssCascade.parser.parse_inline(self.cssElement.getStyleAttr() or '')[0]
if attrName in style:
result = style[attrName]
if result == 'inherit':
if hasattr(self.parentNode, 'getCSSAttr'):
result = self.parentNode.getCSSAttr(cssCascade, attrName, default)
elif default is not NotImplemented:
return default
raise LookupError("Could not find inherited CSS attribute value for '%s'" % (attrName,))
if result is not None:
self.cssAttrs[attrName] = result
return result
#TODO: Monkeypatching standard lib should go away.
xml.dom.minidom.Element.getCSSAttr = getCSSAttr
# Create an aliasing system. Many sources use non-standard tags, because browsers allow
# them to. This allows us to map a nonstandard name to the standard one.
nonStandardAttrNames = {
'bgcolor': 'background-color',
}
def mapNonStandardAttrs(c, n, attrList):
for attr in nonStandardAttrNames:
if attr in attrList and nonStandardAttrNames[attr] not in c:
c[nonStandardAttrNames[attr]] = attrList[attr]
return c
def getCSSAttrCacheKey(node):
_cl = _id = _st = ''
for k, v in node.attributes.items():
if k == 'class':
_cl = v
elif k == 'id':
_id = v
elif k == 'style':
_st = v
return "%s#%s#%s#%s#%s" % (id(node.parentNode), node.tagName.lower(), _cl, _id, _st)
def CSSCollect(node, c):
#node.cssAttrs = {}
#return node.cssAttrs
if c.css:
_key = getCSSAttrCacheKey(node)
if hasattr(node.parentNode, "tagName"):
if node.parentNode.tagName.lower() != "html":
CachedCSSAttr = CSSAttrCache.get(_key, None)
if CachedCSSAttr is not None:
node.cssAttrs = CachedCSSAttr
return CachedCSSAttr
node.cssElement = cssDOMElementInterface.CSSDOMElementInterface(node)
node.cssAttrs = {}
# node.cssElement.onCSSParserVisit(c.cssCascade.parser)
cssAttrMap = {}
for cssAttrName in attrNames:
try:
cssAttrMap[cssAttrName] = node.getCSSAttr(c.cssCascade, cssAttrName)
#except LookupError:
# pass
except Exception: # TODO: Kill this catch-all!
log.debug("CSS error '%s'", cssAttrName, exc_info=1)
CSSAttrCache[_key] = node.cssAttrs
return node.cssAttrs
def lower(sequence):
if sequence in StringTypes:
return sequence.lower()
else:
return sequence[0].lower()
def CSS2Frag(c, kw, isBlock):
# COLORS
if "color" in c.cssAttr:
c.frag.textColor = get_color(c.cssAttr["color"])
if "background-color" in c.cssAttr:
c.frag.backColor = get_color(c.cssAttr["background-color"])
# FONT SIZE, STYLE, WEIGHT
if "font-family" in c.cssAttr:
c.frag.fontName = c.get_font_name(c.cssAttr["font-family"])
if "font-size" in c.cssAttr:
# XXX inherit
c.frag.fontSize = max(get_size("".join(c.cssAttr["font-size"]), c.frag.fontSize, c.baseFontSize), 1.0)
if "line-height" in c.cssAttr:
leading = "".join(c.cssAttr["line-height"])
c.frag.leading = get_size(leading, c.frag.fontSize)
c.frag.leadingSource = leading
else:
c.frag.leading = get_size(c.frag.leadingSource, c.frag.fontSize)
if "letter-spacing" in c.cssAttr:
c.frag.letterSpacing = c.cssAttr["letter-spacing"]
if "-pdf-line-spacing" in c.cssAttr:
c.frag.leadingSpace = get_size("".join(c.cssAttr["-pdf-line-spacing"]))
# print "line-spacing", c.cssAttr["-pdf-line-spacing"], c.frag.leading
if "font-weight" in c.cssAttr:
value = lower(c.cssAttr["font-weight"])
if value in ("bold", "bolder", "500", "600", "700", "800", "900"):
c.frag.bold = 1
else:
c.frag.bold = 0
for value in to_list(c.cssAttr.get("text-decoration", "")):
if "underline" in value:
c.frag.underline = 1
if "line-through" in value:
c.frag.strike = 1
if "none" in value:
c.frag.underline = 0
c.frag.strike = 0
if "font-style" in c.cssAttr:
value = lower(c.cssAttr["font-style"])
if value in ("italic", "oblique"):
c.frag.italic = 1
else:
c.frag.italic = 0
if "white-space" in c.cssAttr:
# normal | pre | nowrap
c.frag.whiteSpace = str(c.cssAttr["white-space"]).lower()
# ALIGN & VALIGN
if "text-align" in c.cssAttr:
c.frag.alignment = get_alignment(c.cssAttr["text-align"])
if "vertical-align" in c.cssAttr:
c.frag.vAlign = c.cssAttr["vertical-align"]
# HEIGHT & WIDTH
if "height" in c.cssAttr:
try:
c.frag.height = "".join(to_list(c.cssAttr["height"])) # XXX Relative is not correct!
except TypeError:
# sequence item 0: expected string, tuple found
c.frag.height = "".join(to_list(c.cssAttr["height"][0]))
if c.frag.height in ("auto",):
c.frag.height = None
if "width" in c.cssAttr:
try:
c.frag.width = "".join(to_list(c.cssAttr["width"])) # XXX Relative is not correct!
except TypeError:
c.frag.width = "".join(to_list(c.cssAttr["width"][0]))
if c.frag.width in ("auto",):
c.frag.width = None
# ZOOM
if "zoom" in c.cssAttr:
zoom = "".join(to_list(c.cssAttr["zoom"])) # XXX Relative is not correct!
if zoom.endswith("%"):
zoom = float(zoom[: - 1]) / 100.0
c.frag.zoom = float(zoom)
# MARGINS & LIST INDENT, STYLE
if isBlock:
if "margin-top" in c.cssAttr:
c.frag.spaceBefore = get_size(c.cssAttr["margin-top"], c.frag.fontSize)
if "margin-bottom" in c.cssAttr:
c.frag.spaceAfter = get_size(c.cssAttr["margin-bottom"], c.frag.fontSize)
if "margin-left" in c.cssAttr:
c.frag.bulletIndent = kw["margin-left"] # For lists
kw["margin-left"] += get_size(c.cssAttr["margin-left"], c.frag.fontSize)
c.frag.leftIndent = kw["margin-left"]
if "margin-right" in c.cssAttr:
kw["margin-right"] += get_size(c.cssAttr["margin-right"], c.frag.fontSize)
c.frag.rightIndent = kw["margin-right"]
if "text-indent" in c.cssAttr:
c.frag.firstLineIndent = get_size(c.cssAttr["text-indent"], c.frag.fontSize)
if "list-style-type" in c.cssAttr:
c.frag.listStyleType = str(c.cssAttr["list-style-type"]).lower()
if "list-style-image" in c.cssAttr:
c.frag.listStyleImage = c.get_file(c.cssAttr["list-style-image"])
# PADDINGS
if isBlock:
if "padding-top" in c.cssAttr:
c.frag.paddingTop = get_size(c.cssAttr["padding-top"], c.frag.fontSize)
if "padding-bottom" in c.cssAttr:
c.frag.paddingBottom = get_size(c.cssAttr["padding-bottom"], c.frag.fontSize)
if "padding-left" in c.cssAttr:
c.frag.paddingLeft = get_size(c.cssAttr["padding-left"], c.frag.fontSize)
if "padding-right" in c.cssAttr:
c.frag.paddingRight = get_size(c.cssAttr["padding-right"], c.frag.fontSize)
# BORDERS
if isBlock:
if "border-top-width" in c.cssAttr:
c.frag.borderTopWidth = get_size(c.cssAttr["border-top-width"], c.frag.fontSize)
if "border-bottom-width" in c.cssAttr:
c.frag.borderBottomWidth = get_size(c.cssAttr["border-bottom-width"], c.frag.fontSize)
if "border-left-width" in c.cssAttr:
c.frag.borderLeftWidth = get_size(c.cssAttr["border-left-width"], c.frag.fontSize)
if "border-right-width" in c.cssAttr:
c.frag.borderRightWidth = get_size(c.cssAttr["border-right-width"], c.frag.fontSize)
if "border-top-style" in c.cssAttr:
c.frag.borderTopStyle = c.cssAttr["border-top-style"]
if "border-bottom-style" in c.cssAttr:
c.frag.borderBottomStyle = c.cssAttr["border-bottom-style"]
if "border-left-style" in c.cssAttr:
c.frag.borderLeftStyle = c.cssAttr["border-left-style"]
if "border-right-style" in c.cssAttr:
c.frag.borderRightStyle = c.cssAttr["border-right-style"]
if "border-top-color" in c.cssAttr:
c.frag.borderTopColor = get_color(c.cssAttr["border-top-color"])
if "border-bottom-color" in c.cssAttr:
c.frag.borderBottomColor = get_color(c.cssAttr["border-bottom-color"])
if "border-left-color" in c.cssAttr:
c.frag.borderLeftColor = get_color(c.cssAttr["border-left-color"])
if "border-right-color" in c.cssAttr:
c.frag.borderRightColor = get_color(c.cssAttr["border-right-color"])
def pisaPreLoop(node, context, collect=False):
"""
Collect all CSS definitions
"""
data = u""
if node.nodeType == Node.TEXT_NODE and collect:
data = node.data
elif node.nodeType == Node.ELEMENT_NODE:
name = node.tagName.lower()
if name in ("style", "link"):
attr = pisaGetAttributes(context, name, node.attributes)
media = [x.strip() for x in attr.media.lower().split(",") if x.strip()]
if attr.get("type", "").lower() in ("", "text/css") and \
(not media or "all" in media or "print" in media or "pdf" in media):
if name == "style":
for node in node.childNodes:
data += pisaPreLoop(node, context, collect=True)
context.add_css(data)
return u""
if name == "link" and attr.href and attr.rel.lower() == "stylesheet":
# print "CSS LINK", attr
context.add_css('\n@import "%s" %s;' % (attr.href, ",".join(media)))
for node in node.childNodes:
result = pisaPreLoop(node, context, collect=collect)
if collect:
data += result
return data
def pisaLoop(node, context, path=None, **kw):
if path is None:
path = []
# Initialize KW
if not kw:
kw = {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}
else:
kw = copy.copy(kw)
#indent = len(path) * " " # only used for debug print statements
# TEXT
if node.nodeType == Node.TEXT_NODE:
# print indent, "#", repr(node.data) #, context.frag
context.add_fragment(node.data)
# context.text.append(node.value)
# ELEMENT
elif node.nodeType == Node.ELEMENT_NODE:
node.tagName = node.tagName.replace(":", "").lower()
if node.tagName in ("style", "script"):
return
path = copy.copy(path) + [node.tagName]
# Prepare attributes
attr = pisaGetAttributes(context, node.tagName, node.attributes)
#log.debug(indent + "<%s %s>" % (node.tagName, attr) + repr(node.attributes.items())) #, path
# Calculate styles
context.cssAttr = CSSCollect(node, context)
context.cssAttr = mapNonStandardAttrs(context.cssAttr, node, attr)
context.node = node
# Block?
PAGE_BREAK = 1
PAGE_BREAK_RIGHT = 2
PAGE_BREAK_LEFT = 3
pageBreakAfter = False
frameBreakAfter = False
display = lower(context.cssAttr.get("display", "inline"))
# print indent, node.tagName, display, context.cssAttr.get("background-color", None), attr
isBlock = (display == "block")
if isBlock:
context.add_paragraph()
# Page break by CSS
if "-pdf-next-page" in context.cssAttr:
context.add_story(NextPageTemplate(str(context.cssAttr["-pdf-next-page"])))
if "-pdf-page-break" in context.cssAttr:
if str(context.cssAttr["-pdf-page-break"]).lower() == "before":
context.add_story(PageBreak())
if "-pdf-frame-break" in context.cssAttr:
if str(context.cssAttr["-pdf-frame-break"]).lower() == "before":
context.add_story(FrameBreak())
if str(context.cssAttr["-pdf-frame-break"]).lower() == "after":
frameBreakAfter = True
if "page-break-before" in context.cssAttr:
if str(context.cssAttr["page-break-before"]).lower() == "always":
context.add_story(PageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "right":
context.add_story(PageBreak())
context.add_story(PmlRightPageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "left":
context.add_story(PageBreak())
context.add_story(PmlLeftPageBreak())
if "page-break-after" in context.cssAttr:
if str(context.cssAttr["page-break-after"]).lower() == "always":
pageBreakAfter = PAGE_BREAK
if str(context.cssAttr["page-break-after"]).lower() == "right":
pageBreakAfter = PAGE_BREAK_RIGHT
if str(context.cssAttr["page-break-after"]).lower() == "left":
pageBreakAfter = PAGE_BREAK_LEFT
if display == "none":
# print "none!"
return
# Translate CSS to frags
# Save previous frag styles
context.push_fragment()
# Map styles to Reportlab fragment properties
CSS2Frag(context, kw, isBlock)
# EXTRAS
if "-pdf-keep-with-next" in context.cssAttr:
context.frag.keepWithNext = str_to_bool(context.cssAttr["-pdf-keep-with-next"])
if "-pdf-outline" in context.cssAttr:
context.frag.outline = str_to_bool(context.cssAttr["-pdf-outline"])
if "-pdf-outline-level" in context.cssAttr:
context.frag.outlineLevel = int(context.cssAttr["-pdf-outline-level"])
if "-pdf-outline-open" in context.cssAttr:
context.frag.outlineOpen = str_to_bool(context.cssAttr["-pdf-outline-open"])
if "-pdf-word-wrap" in context.cssAttr:
context.frag.wordWrap = context.cssAttr["-pdf-word-wrap"]
# handle keep-in-frame
keepInFrameMode = None
keepInFrameMaxWidth = 0
keepInFrameMaxHeight = 0
if "-pdf-keep-in-frame-mode" in context.cssAttr:
value = str(context.cssAttr["-pdf-keep-in-frame-mode"]).strip().lower()
if value in ("shrink", "error", "overflow", "truncate"):
keepInFrameMode = value
if "-pdf-keep-in-frame-max-width" in context.cssAttr:
keepInFrameMaxWidth = get_size("".join(context.cssAttr["-pdf-keep-in-frame-max-width"]))
if "-pdf-keep-in-frame-max-height" in context.cssAttr:
keepInFrameMaxHeight = get_size("".join(context.cssAttr["-pdf-keep-in-frame-max-height"]))
# ignore nested keep-in-frames, tables have their own KIF handling
keepInFrame = keepInFrameMode is not None and context.keepInFrameIndex is None
if keepInFrame:
# keep track of current story index, so we can wrap everythink
# added after this point in a KeepInFrame
context.keepInFrameIndex = len(context.story)
# BEGIN tag
klass = globals().get("pisaTag%s" % node.tagName.replace(":", "").upper(), None)
obj = None
# Static block
elementId = attr.get("id", None)
staticFrame = context.frameStatic.get(elementId, None)
if staticFrame:
context.frag.insideStaticFrame += 1
oldStory = context.swap_story()
# Tag specific operations
if klass is not None:
obj = klass(node, attr)
obj.start(context)
# Visit child nodes
context.fragBlock = fragBlock = copy.copy(context.frag)
for nnode in node.childNodes:
pisaLoop(nnode, context, path, **kw)
context.fragBlock = fragBlock
# END tag
if obj:
obj.end(context)
# Block?
if isBlock:
context.add_paragraph()
# XXX Buggy!
# Page break by CSS
if pageBreakAfter:
context.add_story(PageBreak())
if pageBreakAfter == PAGE_BREAK_RIGHT:
context.add_story(PmlRightPageBreak())
if pageBreakAfter == PAGE_BREAK_LEFT:
context.add_story(PmlLeftPageBreak())
if frameBreakAfter:
context.add_story(FrameBreak())
if keepInFrame:
# get all content added after start of -pdf-keep-in-frame and wrap
# it in a KeepInFrame
substory = context.story[context.keepInFrameIndex:]
context.story = context.story[:context.keepInFrameIndex]
context.story.append(
KeepInFrame(
content=substory,
maxWidth=keepInFrameMaxWidth,
maxHeight=keepInFrameMaxHeight))
context.keepInFrameIndex = None
# Static block, END
if staticFrame:
context.add_paragraph()
for frame in staticFrame:
frame.pisaStaticStory = context.story
context.swap_story(oldStory)
context.frag.insideStaticFrame -= 1
# context.debug(1, indent, "</%s>" % (node.tagName))
# Reset frag style
context.pull_fragment()
# Unknown or not handled
else:
# context.debug(1, indent, "???", node, node.nodeType, repr(node))
# Loop over children
for node in node.childNodes:
pisaLoop(node, context, path, **kw)
def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None):
"""
- Parse HTML and get miniDOM
- Extract CSS informations, add default CSS, parse CSS
- Handle the document DOM itself and build reportlab story
- Return Context object
"""
global CSSAttrCache
CSSAttrCache = {}
if xhtml:
#TODO: XHTMLParser doesn't see to exist...
parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom"))
else:
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
if isinstance(src, text_type):
# If an encoding was provided, do not change it.
if not encoding:
encoding = "utf-8"
src = src.encode(encoding)
src = PisaTempFile(src, capacity=context.capacity)
# Test for the restrictions of html5lib
if encoding:
# Workaround for html5lib<0.11.1
if hasattr(inputstream, "isValidEncoding"):
if encoding.strip().lower() == "utf8":
encoding = "utf-8"
if not inputstream.isValidEncoding(encoding):
log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding)
else:
if inputstream.codecName(encoding) is None:
log.error("%r is not a valid encoding", encoding)
document = parser.parse(
src,
encoding=encoding)
if xml_output:
if encoding:
xml_output.write(document.toprettyxml(encoding=encoding))
else:
xml_output.write(document.toprettyxml(encoding="utf8"))
if default_css:
context.add_default_css(default_css)
pisaPreLoop(document, context)
#try:
context.parse_css()
#except:
# context.cssText = DEFAULT_CSS
# context.parseCSS()
# context.debug(9, pprint.pformat(context.css))
pisaLoop(document, context)
return context
# Shortcuts
HTML2PDF = pisaParser
def XHTML2PDF(*a, **kw):
kw["xhtml"] = True
return HTML2PDF(*a, **kw)
XML2PDF = XHTML2PDF
|
zulumarketing/html2pdf
|
xhtml2pdf/parser.py
|
Python
|
apache-2.0
| 25,758
|
[
"VisIt"
] |
7282f2b4cf8cfa49a689be5fd108b2b2d0b5b4f066a9132b72420d822b3416d4
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
IC-PCA2
Ice-crystal Classification with Pricipal Component Analysis
A complete rewrite in Python 3
Based on the original IC-PCA Matlab tool by Hannakaisa Lindqvist, Jussi Tiira
and Hanne Hakkarainen
@author: Jussi Tiira <[email protected]>
License: GPL v3
"""
import os, glob, configparser
from optparse import OptionParser
def main():
parser = OptionParser()
parser.add_option("-d", "--inputdir", dest="inputdir", default=os.getcwd(),
help="use PATH as input directory", metavar="PATH")
(options,args) = parser.parse_args()
config = configparser.ConfigParser()
config['DEFAULT'] = {'InputFilenames': '*.jpg'}
with open('classify.conf', 'w') as configfile:
config.write(configfile)
config.read('classify.conf')
files = glob.glob(os.path.join(options.inputdir,config['DEFAULT']['InputFilenames']))
print(files)
if __name__ == "__main__":
main()
|
juhi24/ic-pca2
|
classify.py
|
Python
|
gpl-3.0
| 1,014
|
[
"CRYSTAL"
] |
82bed5a9809938e1349e6611459e8382eb73c76a8949bc2692d567ca6d36b84c
|
#!/usr/bin/env python
# Dan Blankenberg
import json
import logging
import optparse
import os
import shutil
import tempfile
from xml.etree.ElementTree import tostring
try:
# For Python 3.0 and later
from shutil import unpack_archive
except ImportError:
# Fall back to Python 2 import
from setuptools.archive_util import unpack_archive
try:
# For Python 3.0 and later
from urllib.request import urlretrieve
from urllib.parse import urlsplit
except ImportError:
# Fall back to Python 2 imports
from urllib import urlretrieve
from urlparse import urlsplit
_log_name = __name__
if _log_name == '__builtin__':
_log_name = 'toolshed.installed.manual.data.manager'
log = logging.getLogger(_log_name)
# --- These methods are called by/within the Galaxy Application
def exec_before_job(app, inp_data, out_data, param_dict, tool=None, **kwd):
# Look for any data tables that haven't been defined for this data manager before and dynamically add them to Galaxy
param_dict = dict(**param_dict)
data_tables_param = param_dict.get('data_tables', [])
if not isinstance(data_tables_param, list):
data_tables_param = [data_tables_param]
if tool:
tool_shed_repository = tool.tool_shed_repository
else:
tool_shed_repository = None
tdtm = None
data_manager = app.data_managers.get_manager(tool.data_manager_id, None)
for data_table_param in data_tables_param:
data_table_name = data_table_param.get('data_table_name')
if data_table_name:
# the 'data_table_name' value in data_table_param is a SelectToolParameter,
# to get the selected value we need to cast data_table_name to string
data_table_name = str(data_table_name)
# get data table managed by this data Manager
data_table = app.tool_data_tables.get_tables().get(data_table_name)
if data_table:
data_table_filename = data_table.get_filename_for_source(data_manager, None)
if not data_table_filename:
if tdtm is None:
from tool_shed.tools import data_table_manager
tdtm = data_table_manager.ToolDataTableManager(app)
target_dir, tool_path, relative_target_dir = tdtm.get_target_install_dir(tool_shed_repository)
# Dynamically add this data table
log.debug("Attempting to dynamically create a missing Tool Data Table named %s." % data_table_name)
repo_info = tdtm.generate_repository_info_elem_from_repository(tool_shed_repository, parent_elem=None)
if repo_info is not None:
repo_info = tostring(repo_info)
tmp_file = tempfile.NamedTemporaryFile(mode="w")
tmp_file.write(__get_new_xml_definition(app, data_table, data_manager, repo_info, target_dir))
tmp_file.flush()
app.tool_data_tables.add_new_entries_from_config_file(tmp_file.name, None, app.config.shed_tool_data_table_config, persist=True)
tmp_file.close()
def __get_new_xml_definition(app, data_table, data_manager, repo_info=None, location_file_dir=None):
sub_dict = {'table_name': data_table.name, 'comment_char': '', 'columns': '', 'file_path': ''}
sub_dict.update(data_manager.get_tool_shed_repository_info_dict())
if data_table.comment_char:
sub_dict['comment_char'] = 'comment_char="%s"' % (data_table.comment_char)
for i, name in enumerate(data_table.get_column_name_list()):
if name is not None:
sub_dict['columns'] = "%s\n%s" % (sub_dict['columns'], '<column name="%s" index="%s" />' % (name, i))
location_file_dir = location_file_dir or app.config.galaxy_data_manager_data_path
for filename in data_table.filenames.keys():
sub_dict['file_path'] = os.path.basename(filename)
sub_dict['file_path'] = os.path.join(location_file_dir, sub_dict['file_path']) # os.path.abspath?
if not os.path.exists(sub_dict['file_path']):
# Create empty file
log.debug("Attempting to create a missing location file %s." % sub_dict['file_path'])
open(sub_dict['file_path'], 'wb+').close()
break
sub_dict['repo_info'] = repo_info or ''
return """
<tables><table name="%(table_name)s" %(comment_char)s>
%(columns)s
<file path="%(file_path)s" />
%(repo_info)s
</table></tables>
""" % sub_dict
def galaxy_code_get_available_data_tables(trans):
# list of data tables
return [(x, x, False) for x in trans.app.tool_data_tables.get_tables().keys()]
def galaxy_code_get_available_data_table_columns(trans, data_table_name):
return [(x, x, True) for x in trans.app.tool_data_tables.get(data_table_name).get_column_name_list()]
# --- End Galaxy called Methods ---
def get_data_table_entries(params, galaxy_data_manager_data_path):
rval = {}
data_tables = params.get('data_tables', [])
for data_table in data_tables:
entry_dict = {}
for column in data_table.get('columns', []):
value = column.get('data_table_column_value', '')
if column.get('is_path', {}).get('is_path_selector') == 'yes' and column.get('is_path', {}).get('abspath') == 'abspath':
value = os.path.abspath(os.path.join(galaxy_data_manager_data_path, value))
entry_dict[column.get('data_table_column_name', '')] = value
data_table_name = data_table['data_table_name']
rval[data_table_name] = rval.get(data_table_name, [])
rval[data_table_name].append(entry_dict)
return rval
def get_file_content(params, target_directory):
directory_content = params.get('directory_content', [])
for content in directory_content:
target_path = os.path.join(target_directory, content.get('subdir', ''))
try:
os.makedirs(target_path)
except OSError:
pass
if content.get('file_source', {}).get('file_source_selector') == 'URL':
(filename, headers) = urlretrieve(content.get('file_source', {}).get('file_URL'))
try:
bname = headers['Content-Disposition']
except KeyError:
bname = os.path.basename(urlsplit(content.get('file_source', {}).get('file_URL')).path)
else:
filename = content.get('file_source', {}).get('file_history')
bname = os.path.basename(filename)
file_action = content.get('file_action', {}).get('file_action_selector')
if file_action == 'unpack':
unpack_archive(filename, target_path)
else:
filename_override = content.get('file_action', {}).get('filename_override')
if filename_override:
target_path = os.path.join(target_path, filename_override)
else:
target_path = os.path.join(target_path, bname)
shutil.copyfile(filename, target_path)
return len(directory_content)
def main():
parser = optparse.OptionParser()
parser.add_option('', '--galaxy_data_manager_data_path', dest='galaxy_data_manager_data_path', default='', help='Root path for galaxy_data_manager_data_path')
(options, args) = parser.parse_args()
filename = args[0]
with open(filename) as fh:
params = json.loads(fh.read())
target_directory = params['output_data'][0]['extra_files_path']
data_table_entries = get_data_table_entries(params['param_dict'], options.galaxy_data_manager_data_path)
# save info to json file
with open(filename, 'w') as fh:
fh.write(json.dumps({"data_tables": data_table_entries}, sort_keys=True))
get_file_content(params['param_dict'], target_directory)
if __name__ == "__main__":
main()
|
mblue9/tools-iuc
|
data_managers/data_manager_manual/data_manager/data_manager_manual.py
|
Python
|
mit
| 7,932
|
[
"Galaxy"
] |
d72d20b9ae4f8239c56d6b719612cc5f69afc88f3698862e3e4312fcc63f0a26
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkDataSetReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkDataSetReader(), 'Reading vtkDataSet.',
(), ('vtkDataSet',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkDataSetReader.py
|
Python
|
bsd-3-clause
| 480
|
[
"VTK"
] |
1206544f7faad7a5b82e25a0da6c8401a323dad7a4b0e7f0cbaa8ffed0527b6e
|
from graphs import Graph
"""
find a sequence of moves that allow the knight to visit every square on the board exactly once.
"""
def knightGraph(bdsize):
ktGraph = Graph()
for row in range(bdsize):
for col in range(bdsize):
nodeId = posToNodeId(row, col, bdsize)
newPositions = genLegalMoves(row, col, bdsize)
for e in newPositions:
nid = posToNodeId(e[0], e[1], bdsize)
ktGraph.addEdge(nodeId, nid)
return ktGraph
def posToNodeId(row, column, board_size):
return (row*board_size) + column
def genLegalMoves(x, y, bdsize):
newMoves = []
moveOffsets = [
(-1,-2),(-1,2),(-2,-1),(-2,1),
( 1,-2),( 1,2),( 2,-1),( 2,1)
]
for i in moveOffsets:
newX = x + i[0]
newY = y + i[1]
if legalCoord(newX, bdsize) and legalCoord(newY, bdsize):
newMoves.append((newX, newY))
return newMoves
def legalCoord(x, bdsize):
if x >= 0 and x < bdsize:
return True
return False
|
vector8188/AlgorithmAnalysisPython
|
knightsTour.py
|
Python
|
mit
| 1,047
|
[
"VisIt"
] |
cc7245708eb4d33458fac5e10bcc5dd92e0c63708f1bfe1807e122118adb5c1a
|
# encoding: utf-8
"""Module for (annotated) unit housekeeping; see ``ModuleSIRegister``."""
from __future__ import division
import string
import si.math
from si.units import SIUnit, SICompoundUnit
class ModuleSIRegister(object):
"""In a module, create a ``_register = ModuleSIRegister(locals())`` and add define units like ``_register.register(s**-1, "Hz", "herz", prefixes = list('YZEPTGMk'))``. Units will be made available for inclusion with import automatically, and prefixing is handled."""
loadedmodules = [] # keep a list in the class
def __init__(self, locals):
self.units = []
self.locals = locals
self.loadedmodules.append(self)
self._prefix = False
@staticmethod
def _stringlist(o):
if isinstance(o, basestring): return [o]
if hasattr(o, "__len__"):
return o
else:
return [o]
def register(self, unit, symbol, name, description = None, prefixes = None, map = "exact"):
"""A wrapper around SIUnit to add a unit with symbols and names (can be string/unicode or list thereof; symbols can contain TeX representations like r"$\sigma$").
prefixes is a list of SI prefix names common for that unit (or True for all, or one of ``["kg","m2","m3"]`` for magic handling). map defines if and how the unit should be used to give names to SI quantities ("always" for normal use, "exact" to match only scalar multiples of the unit, "never") """
symbol = self._stringlist(symbol)
name = self._stringlist(name)
ru = SIUnit(unit, symbol, name, description, prefixes, map)
self.units.append(ru)
for n in ru.get_python_names():
self.locals[n] = unit
if self._prefix:
for n, pu in ru.get_prefixed():
self.locals[n] = pu
def prefix(self):
"""Include all prefixed forms of registered SI objects in the namespace and do so for all registered in future."""
self._prefix = True
for ru in self.units:
for n, pu in ru.get_prefixed():
self.locals[n] = pu
def search(q):
"""Search loaded modules for a quantity exactly matching the search term q.
>>> from si.common import *
>>> search("u")
<SIUnit: 'Dalton' ('u')>"""
result = []
for m in ModuleSIRegister.loadedmodules:
for u in m.units:
if q in u.symbol or q in u.name:
result.append(u)
if not result: raise LookupError("No matching unit.")
assert len(result)==1, "Multiple units match that name." # should not occur with shipped modules
return result[0]
def search_prefixed(q):
"""Like ``search``, but strip prefixes. Return a tuple of the prefix factor and the found unit.
>>> import si.common # load unit definitions
>>> search_prefixed("Gg") # one giga-gram
(1000000, <SIUnit: 'kilogram' ('kg')>)
"""
import si.prefixes
q = q.replace(u"μ","u").replace(u"µ","u") # FIXME
factor = 1
stripped = q
for p,f in vars(si.prefixes).iteritems():
if q.startswith(p):
assert factor == 1, "Multiple prefixes match that name." # should not occur with shipped modules.
factor = f
stripped = q[len(p):]
# kg needs very special handling, unfortunately.
if stripped == "g":
return si.math.truediv(factor,1000), search("kg")
try:
unit = search(stripped)
except: # maybe a prefix should not have been stripped
return (1, search(q))
if unit.prefixes == "m2": factor = si.math.pow(factor, 2) # magic prefix handling!
elif unit.prefixes == "m3": factor = si.math.pow(factor, 3)
return factor, unit
def si_from_string(s):
"""Convert a string to a SI quantity.
>>> import si.common # load unit definitions
>>> print si_from_string("5S/cm^2")
50000 S/m^2
>>> print si_from_string("5 J/(m*mol)")
5 N/mol
>>> print si_from_string("50 WbkA^2")
50000000 A J
>>> print si_from_string("kHz")
1000 Hz
#>>> print si_from_string("mm") # fail with sympy
#0.001 m
#>>> print si_from_string("degree") # fail with python maths
#(1/180)*pi
"""
lastnumber = 0
while s[lastnumber] in string.digits+"./": lastnumber+=1
number, unit = s[:lastnumber].strip(),s[lastnumber:].strip()
if not number: number = "1"
decomp = SICompoundUnit(unit)
result = decomp.to_unit()
result = result * si.math.nonint(number)
return result
|
chrysn/si
|
si/register.py
|
Python
|
bsd-2-clause
| 4,102
|
[
"Dalton"
] |
6e1513f8ac6204a25a02586d4c615eb24cae7b81ce35b0226d672874851a94d9
|
# -*- coding: utf-8 -*-
tex_nine_maths_cache = [
{"word": r"aleph", "menu": "ℵ"},
{"word": r"alpha", "menu": "α"},
{"word": r"amalg", "menu": "⨿"},
{"word": r"angle", "menu": "∠"},
{"word": r"approx", "menu": "≈"},
{"word": r"ast", "menu": "∗"},
{"word": r"asymp", "menu": "≍"},
{"word": r"backslash", "menu": "\\"},
{"word": r"beta", "menu": "β"},
{"word": r"bigcap", "menu": "∩"},
{"word": r"bigcirc", "menu": "◦"},
{"word": r"bigcup", "menu": "∪"},
{"word": r"bigodot", "menu": "⊙"},
{"word": r"bigoplus", "menu": "⊕"},
{"word": r"bigotimes", "menu": "⊗"},
{"word": r"bigsqcup", "menu": "⊔"},
{"word": r"bigtriangledown", "menu": "▽"},
{"word": r"bigtriangleup", "menu": "△"},
{"word": r"biguplus", "menu": "⊎"},
{"word": r"bigvee", "menu": "∨"},
{"word": r"bigwedge", "menu": "∧"},
{"word": r"bot", "menu": "⊥"},
{"word": r"bowtie", "menu": "▷◁"},
{"word": r"Box", "menu": "□"},
{"word": r"bullet", "menu": "•"},
{"word": r"cap", "menu": "∩"},
{"word": r"cdot", "menu": "·"},
{"word": r"cdots", "menu": "···"},
{"word": r"chi", "menu": "χ"},
{"word": r"circ", "menu": "◦"},
{"word": r"clubsuit", "menu": "♣"},
{"word": r"cong", "menu": "≅"},
{"word": r"coprod", "menu": "⨿"},
{"word": r"cup", "menu": "∪"},
{"word": r"dagger", "menu": "†"},
{"word": r"dashv", "menu": "⊣"},
{"word": r"ddagger", "menu": "‡"},
{"word": r"ddots", "menu": "..."},
{"word": r"Delta", "menu": "∆"},
{"word": r"delta", "menu": "δ"},
{"word": r"diamond", "menu": "⋄"},
{"word": r"Diamond", "menu": "♢"},
{"word": r"diamondsuit", "menu": "♢"},
{"word": r"div", "menu": "÷"},
{"word": r"doteq", "menu": "≐"},
{"word": r"downarrow", "menu": "↓"},
{"word": r"Downarrow", "menu": "⇓"},
{"word": r"ell", "menu": "ℓ"},
{"word": r"emptyset", "menu": "∅"},
{"word": r"epsilon", "menu": "ϵ"},
{"word": r"equiv", "menu": "≡"},
{"word": r"eta", "menu": "η"},
{"word": r"exists", "menu": "∃"},
{"word": r"flat", "menu": "♭"},
{"word": r"forall", "menu": "∀"},
{"word": r"frown", "menu": "⌢"},
{"word": r"gamma", "menu": "γ"},
{"word": r"Gamma", "menu": "Γ"},
{"word": r"geq", "menu": "≥"},
{"word": r"gg", "menu": "≫"},
{"word": r"hbar", "menu": "ħ"},
{"word": r"heartsuit", "menu": "♡"},
{"word": r"hookleftarrow", "menu": "↩"},
{"word": r"hookrightarrow", "menu": "↪"},
{"word": r"imath", "menu": "ı"},
{"word": r"Im", "menu": "ℑ"},
{"word": r"infty", "menu": "∞"},
{"word": r"in", "menu": "∈"},
{"word": r"int", "menu": "∫"},
{"word": r"iota", "menu": "ι"},
{"word": r"jmath", "menu": "ȷ"},
{"word": r"kappa", "menu": "κ"},
{"word": r"lambda", "menu": "λ"},
{"word": r"Lambda", "menu": "Λ"},
{"word": r"langle", "menu": "〈"},
{"word": r"ldots", "menu": "..."},
{"word": r"leadsto", "menu": "⇝"},
{"word": r"leftarrow", "menu": "←"},
{"word": r"Leftarrow", "menu": "⇐"},
{"word": r"leftharpoondown", "menu": "↽"},
{"word": r"leftharpoonup", "menu": "↼"},
{"word": r"leftrightarrow", "menu": "↔"},
{"word": r"Leftrightarrow", "menu": "⇔"},
{"word": r"leq", "menu": "≤"},
{"word": r"lhd", "menu": "◁"},
{"word": r"ll", "menu": "≪"},
{"word": r"longleftarrow", "menu": "←−"},
{"word": r"Longleftarrow", "menu": "⇐="},
{"word": r"longleftrightarrow", "menu": "←→"},
{"word": r"Longleftrightarrow", "menu": "⇐⇒"},
{"word": r"longmapsto", "menu": "−→"},
{"word": r"longrightarrow", "menu": "−→"},
{"word": r"Longrightarrow", "menu": "=⇒"},
{"word": r"mapsto", "menu": "↦"},
{"word": r"|", "menu": "∥"},
{"word": r"mho", "menu": "℧"},
{"word": r"mid", "menu": "|"},
{"word": r"models", "menu": "⊧"},
{"word": r"mp", "menu": "∓"},
{"word": r"mu", "menu": "μ"},
{"word": r"nabla", "menu": "∇"},
{"word": r"natural", "menu": "♮"},
{"word": r"nearrow", "menu": "↗"},
{"word": r"neg", "menu": "¬"},
{"word": r"neq", "menu": "≠"},
{"word": r"ni", "menu": "∋"},
{"word": r"nu", "menu": "ν"},
{"word": r"nwarrow", "menu": "↖"},
{"word": r"odot", "menu": "⊙"},
{"word": r"oint", "menu": "∮"},
{"word": r"omega", "menu": "ω"},
{"word": r"Omega", "menu": "Ω"},
{"word": r"ominus", "menu": "⊖"},
{"word": r"oplus", "menu": "⊕"},
{"word": r"oslash", "menu": "⊘"},
{"word": r"otimes", "menu": "⊗"},
{"word": r"parallel", "menu": "∥"},
{"word": r"partial", "menu": "∂"},
{"word": r"perp", "menu": "⊥"},
{"word": r"Phi", "menu": "Φ"},
{"word": r"phi", "menu": "ϕ"},
{"word": r"pi", "menu": "π"},
{"word": r"Pi", "menu": "Π"},
{"word": r"pm", "menu": "±"},
{"word": r"preceq", "menu": "⪯"},
{"word": r"prec", "menu": "≺"},
{"word": r"prime", "menu": "′"},
{"word": r"prod", "menu": "∏"},
{"word": r"propto", "menu": "∝"},
{"word": r"psi", "menu": "ψ"},
{"word": r"Psi", "menu": "Ψ"},
{"word": r"rangle", "menu": "〉"},
{"word": r"Re", "menu": "ℜ"},
{"word": r"rhd", "menu": "▷"},
{"word": r"rho", "menu": "ρ"},
{"word": r"rightarrow", "menu": "→"},
{"word": r"Rightarrow", "menu": "⇒"},
{"word": r"rightharpoondown", "menu": "⇁"},
{"word": r"rightharpoonup", "menu": "⇀"},
{"word": r"rightleftharpoons", "menu": "⇌"},
{"word": r"searrow", "menu": "↘"},
{"word": r"setminus", "menu": "\\"},
{"word": r"sharp", "menu": "♯"},
{"word": r"sigma", "menu": "σ"},
{"word": r"Sigma", "menu": "Σ"},
{"word": r"simeq", "menu": "≃"},
{"word": r"sim", "menu": "∼"},
{"word": r"smile", "menu": "⌣"},
{"word": r"spadesuit", "menu": "♠"},
{"word": r"sqcap", "menu": "⊓"},
{"word": r"sqcup", "menu": "⊔"},
{"word": r"sqsubseteq", "menu": "⊑"},
{"word": r"sqsubset", "menu": "⊏"},
{"word": r"sqsupseteq", "menu": "⊒"},
{"word": r"sqsupset", "menu": "⊐"},
{"word": r"star", "menu": "⋆"},
{"word": r"subseteq", "menu": "⊆"},
{"word": r"subset", "menu": "⊂"},
{"word": r"succeq", "menu": "⪰"},
{"word": r"succ", "menu": "≻"},
{"word": r"sum", "menu": "∑"},
{"word": r"supseteq", "menu": "⊇"},
{"word": r"supset", "menu": "⊃"},
{"word": r"surd", "menu": "√"},
{"word": r"swarrow", "menu": "↙"},
{"word": r"tau", "menu": "τ"},
{"word": r"theta", "menu": "θ"},
{"word": r"Theta", "menu": "Θ"},
{"word": r"times", "menu": "×"},
{"word": r"top", "menu": "⊤"},
{"word": r"triangleleft", "menu": "◁"},
{"word": r"triangle", "menu": "△"},
{"word": r"triangleright", "menu": "▷"},
{"word": r"unlhd", "menu": "⊴"},
{"word": r"unrhd", "menu": "⊵"},
{"word": r"uparrow", "menu": "↑"},
{"word": r"Uparrow", "menu": "⇑"},
{"word": r"updownarrow", "menu": "↕"},
{"word": r"Updownarrow", "menu": "⇕"},
{"word": r"uplus", "menu": "⊎"},
{"word": r"upsilon", "menu": "υ"},
{"word": r"Upsilon", "menu": "Υ"},
{"word": r"varepsilon", "menu": "ε"},
{"word": r"varphi", "menu": "φ"},
{"word": r"varpi", "menu": "ϖ"},
{"word": r"varrho", "menu": "ϱ"},
{"word": r"varsigma", "menu": "ς"},
{"word": r"vartheta", "menu": "ϑ"},
{"word": r"vdash", "menu": "⊢"},
{"word": r"vdots", "menu": "..."},
{"word": r"vee", "menu": "∨"},
{"word": r"wedge", "menu": "∧"},
{"word": r"wp", "menu": "℘"},
{"word": r"wr", "menu": "≀"},
{"word": r"xi", "menu": "ξ"},
{"word": r"Xi", "menu": "Ξ"},
{"word": r"zeta", "menu": "ζ"}
]
|
vim-scripts/TeX-9
|
ftplugin/tex_nine/tex_nine_symbols.py
|
Python
|
gpl-3.0
| 10,868
|
[
"Bowtie"
] |
b8a92e047d4ba439534ec81bb80b395f69a64b72ed8dcd2332cb5c0586bd5409
|
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.en import article, referenced
from pattern.en import pluralize, singularize
from pattern.en import comparative, superlative
from pattern.en import conjugate, lemma, lexeme, tenses
from pattern.en import NOUN, VERB, ADJECTIVE
# The en module has a range of tools for word inflection:
# guessing the indefinite article of a word (a/an?),
# pluralization and singularization, comparative and superlative adjectives, verb conjugation.
# INDEFINITE ARTICLE
# ------------------
# The article() function returns the indefinite article (a/an) for a given noun.
# The definitive article is always "the". The plural indefinite is "some".
print(article("bear") + " bear")
print("")
# The referenced() function returns a string with article() prepended to the given word.
# The referenced() funtion is non-trivial, as demonstrated with the exception words below:
for word in ["hour", "one-liner", "European", "university", "owl", "yclept", "year"]:
print(referenced(word))
print("")
# PLURALIZATION
# -------------
# The pluralize() function returns the plural form of a singular noun (or adjective).
# The algorithm is robust and handles about 98% of exceptions correctly:
for word in ["part-of-speech", "child", "dog's", "wolf", "bear", "kitchen knife"]:
print(pluralize(word))
print(pluralize("octopus", classical=True))
print(pluralize("matrix", classical=True))
print(pluralize("matrix", classical=False))
print(pluralize("my", pos=ADJECTIVE))
print("")
# SINGULARIZATION
# ---------------
# The singularize() function returns the singular form of a plural noun (or adjective).
# It is slightly less robust than the pluralize() function.
for word in ["parts-of-speech", "children", "dogs'", "wolves", "bears", "kitchen knives",
"octopodes", "matrices", "matrixes"]:
print(singularize(word))
print(singularize("our", pos=ADJECTIVE))
print("")
# COMPARATIVE & SUPERLATIVE ADJECTIVES
# ------------------------------------
# The comparative() and superlative() functions give the comparative/superlative form of an adjective.
# Words with three or more syllables are simply preceded by "more" or "most".
for word in ["gentle", "big", "pretty", "hurt", "important", "bad"]:
print("%s => %s => %s" % (word, comparative(word), superlative(word)))
print("")
# VERB CONJUGATION
# ----------------
# The lexeme() function returns a list of all possible verb inflections.
# The lemma() function returns the base form (infinitive) of a verb.
print("lexeme: %s" % lexeme("be"))
print("lemma: %s" % lemma("was"))
print("")
# The conjugate() function inflects a verb to another tense.
# You can supply:
# - tense : INFINITIVE, PRESENT, PAST,
# - person: 1, 2, 3 or None,
# - number: SINGULAR, PLURAL,
# - mood : INDICATIVE, IMPERATIVE,
# - aspect: IMPERFECTIVE, PROGRESSIVE.
# The tense can also be given as an abbreviated alias, e.g.,
# inf, 1sg, 2sg, 3sg, pl, part, 1sgp, 2sgp, 3sgp, ppl, ppart.
from pattern.en import PRESENT, SINGULAR
print(conjugate("being", tense=PRESENT, person=1, number=SINGULAR, negated=False))
print(conjugate("being", tense="1sg", negated=False))
print("")
# Prefer the full constants for code that will be reused/shared.
# The tenses() function returns a list of all tenses for the given verb form.
# Each tense is a tuple of (tense, person, number, mood, aspect).
# For example: tenses("are") => [('present', 2, 'plural', 'indicative', 'imperfective'), ...]
# You can then check if a tense constant is in the list.
# This will also work with aliases, even though they are not explicitly in the list.
from pattern.en import PRESENT, PLURAL
print(tenses("are"))
print((PRESENT, 1, PLURAL) in tenses("are"))
print("pl" in tenses("are"))
|
clips/pattern
|
examples/03-en/01-inflect.py
|
Python
|
bsd-3-clause
| 3,915
|
[
"Octopus"
] |
e62ff2fbff3b7f3aeac10d7db9657ebe84cdbca737c7f245fb8d8c817cfd0094
|
#
# @file TestUnitKind.py
# @brief UnitKind enumeration unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestUnitKind.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestUnitKind(unittest.TestCase):
def test_UnitKind_equals(self):
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_AMPERE,libsbml.UNIT_KIND_AMPERE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_INVALID,libsbml.UNIT_KIND_INVALID) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_LITER,libsbml.UNIT_KIND_LITER) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_LITRE,libsbml.UNIT_KIND_LITRE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_METER,libsbml.UNIT_KIND_METER) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_METRE,libsbml.UNIT_KIND_METRE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_LITER,libsbml.UNIT_KIND_LITRE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_LITRE,libsbml.UNIT_KIND_LITER) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_METER,libsbml.UNIT_KIND_METRE) )
self.assertEqual( 1, libsbml.UnitKind_equals(libsbml.UNIT_KIND_METRE,libsbml.UNIT_KIND_METER) )
self.assertEqual( 0, libsbml.UnitKind_equals(libsbml.UNIT_KIND_AMPERE,libsbml.UNIT_KIND_WEBER) )
pass
def test_UnitKind_forName(self):
self.assert_( libsbml.UnitKind_forName("ampere") == libsbml.UNIT_KIND_AMPERE )
self.assert_( libsbml.UnitKind_forName("becquerel") == libsbml.UNIT_KIND_BECQUEREL )
self.assert_( libsbml.UnitKind_forName("candela") == libsbml.UNIT_KIND_CANDELA )
self.assert_( libsbml.UnitKind_forName("Celsius") == libsbml.UNIT_KIND_CELSIUS )
self.assert_( libsbml.UnitKind_forName("coulomb") == libsbml.UNIT_KIND_COULOMB )
self.assert_( libsbml.UnitKind_forName("dimensionless") == libsbml.UNIT_KIND_DIMENSIONLESS )
self.assert_( libsbml.UnitKind_forName("farad") == libsbml.UNIT_KIND_FARAD )
self.assert_( libsbml.UnitKind_forName("gram") == libsbml.UNIT_KIND_GRAM )
self.assert_( libsbml.UnitKind_forName("gray") == libsbml.UNIT_KIND_GRAY )
self.assert_( libsbml.UnitKind_forName("henry") == libsbml.UNIT_KIND_HENRY )
self.assert_( libsbml.UnitKind_forName("hertz") == libsbml.UNIT_KIND_HERTZ )
self.assert_( libsbml.UnitKind_forName("item") == libsbml.UNIT_KIND_ITEM )
self.assert_( libsbml.UnitKind_forName("joule") == libsbml.UNIT_KIND_JOULE )
self.assert_( libsbml.UnitKind_forName("katal") == libsbml.UNIT_KIND_KATAL )
self.assert_( libsbml.UnitKind_forName("kelvin") == libsbml.UNIT_KIND_KELVIN )
self.assert_( libsbml.UnitKind_forName("kilogram") == libsbml.UNIT_KIND_KILOGRAM )
self.assert_( libsbml.UnitKind_forName("liter") == libsbml.UNIT_KIND_LITER )
self.assert_( libsbml.UnitKind_forName("litre") == libsbml.UNIT_KIND_LITRE )
self.assert_( libsbml.UnitKind_forName("lumen") == libsbml.UNIT_KIND_LUMEN )
self.assert_( libsbml.UnitKind_forName("lux") == libsbml.UNIT_KIND_LUX )
self.assert_( libsbml.UnitKind_forName("meter") == libsbml.UNIT_KIND_METER )
self.assert_( libsbml.UnitKind_forName("metre") == libsbml.UNIT_KIND_METRE )
self.assert_( libsbml.UnitKind_forName("mole") == libsbml.UNIT_KIND_MOLE )
self.assert_( libsbml.UnitKind_forName("newton") == libsbml.UNIT_KIND_NEWTON )
self.assert_( libsbml.UnitKind_forName("ohm") == libsbml.UNIT_KIND_OHM )
self.assert_( libsbml.UnitKind_forName("pascal") == libsbml.UNIT_KIND_PASCAL )
self.assert_( libsbml.UnitKind_forName("radian") == libsbml.UNIT_KIND_RADIAN )
self.assert_( libsbml.UnitKind_forName("second") == libsbml.UNIT_KIND_SECOND )
self.assert_( libsbml.UnitKind_forName("siemens") == libsbml.UNIT_KIND_SIEMENS )
self.assert_( libsbml.UnitKind_forName("sievert") == libsbml.UNIT_KIND_SIEVERT )
self.assert_( libsbml.UnitKind_forName("steradian") == libsbml.UNIT_KIND_STERADIAN )
self.assert_( libsbml.UnitKind_forName("tesla") == libsbml.UNIT_KIND_TESLA )
self.assert_( libsbml.UnitKind_forName("volt") == libsbml.UNIT_KIND_VOLT )
self.assert_( libsbml.UnitKind_forName("watt") == libsbml.UNIT_KIND_WATT )
self.assert_( libsbml.UnitKind_forName("weber") == libsbml.UNIT_KIND_WEBER )
self.assert_( libsbml.UnitKind_forName(None) == libsbml.UNIT_KIND_INVALID )
self.assert_( libsbml.UnitKind_forName("") == libsbml.UNIT_KIND_INVALID )
self.assert_( libsbml.UnitKind_forName("foobar") == libsbml.UNIT_KIND_INVALID )
pass
def test_UnitKind_isValidUnitKindString(self):
self.assertEqual( 0, libsbml.UnitKind_isValidUnitKindString("fun-foam-unit for kids!",1,1) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("litre",2,2) )
self.assertEqual( 0, libsbml.UnitKind_isValidUnitKindString("liter",2,2) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("liter",1,2) )
self.assertEqual( 0, libsbml.UnitKind_isValidUnitKindString("meter",2,3) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("metre",2,1) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("meter",1,2) )
self.assertEqual( 1, libsbml.UnitKind_isValidUnitKindString("Celsius",2,1) )
self.assertEqual( 0, libsbml.UnitKind_isValidUnitKindString("Celsius",2,2) )
pass
def test_UnitKind_toString(self):
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_AMPERE)
self.assert_(( "ampere" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_BECQUEREL)
self.assert_(( "becquerel" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_CANDELA)
self.assert_(( "candela" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_CELSIUS)
self.assert_(( "Celsius" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_COULOMB)
self.assert_(( "coulomb" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_DIMENSIONLESS)
self.assert_(( "dimensionless" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_FARAD)
self.assert_(( "farad" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_GRAM)
self.assert_(( "gram" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_GRAY)
self.assert_(( "gray" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_HENRY)
self.assert_(( "henry" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_HERTZ)
self.assert_(( "hertz" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_ITEM)
self.assert_(( "item" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_JOULE)
self.assert_(( "joule" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_KATAL)
self.assert_(( "katal" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_KELVIN)
self.assert_(( "kelvin" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_KILOGRAM)
self.assert_(( "kilogram" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_LITER)
self.assert_(( "liter" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_LITRE)
self.assert_(( "litre" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_LUMEN)
self.assert_(( "lumen" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_LUX)
self.assert_(( "lux" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_METER)
self.assert_(( "meter" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_METRE)
self.assert_(( "metre" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_MOLE)
self.assert_(( "mole" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_NEWTON)
self.assert_(( "newton" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_OHM)
self.assert_(( "ohm" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_PASCAL)
self.assert_(( "pascal" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_RADIAN)
self.assert_(( "radian" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_SECOND)
self.assert_(( "second" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_SIEMENS)
self.assert_(( "siemens" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_SIEVERT)
self.assert_(( "sievert" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_STERADIAN)
self.assert_(( "steradian" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_TESLA)
self.assert_(( "tesla" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_VOLT)
self.assert_(( "volt" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_WATT)
self.assert_(( "watt" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_WEBER)
self.assert_(( "weber" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_INVALID)
self.assert_(( "(Invalid UnitKind)" == s ))
s = libsbml.UnitKind_toString(-1)
self.assert_(( "(Invalid UnitKind)" == s ))
s = libsbml.UnitKind_toString(libsbml.UNIT_KIND_INVALID + 1)
self.assert_(( "(Invalid UnitKind)" == s ))
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestUnitKind))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/sbml/TestUnitKind.py
|
Python
|
bsd-3-clause
| 10,570
|
[
"VisIt"
] |
f09225b1c946823d7dd80db6c124e0de8529a79d3580b19f49e2b26f7eaf3180
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft = python sts = 4 ts = 4 sw = 4 et:
"""ANTS Apply Transforms interface
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
from .base import ANTSCommand, ANTSCommandInputSpec
from ..base import (TraitedSpec, File, traits,
isdefined)
from ...utils.filemanip import split_filename
from nipype.interfaces.base import InputMultiPath
class WarpTimeSeriesImageMultiTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(4, 3, argstr='%d', usedefault=True,
desc='image dimension (3 or 4)', position=1)
input_image = File(argstr='%s', mandatory=True, copyfile=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'))
out_postfix = traits.Str('_wtsimt', argstr='%s', usedefault=True,
desc=('Postfix that is prepended to all output '
'files (default = _wtsimt)'))
reference_image = File(argstr='-R %s', xor=['tightest_box'],
desc='reference image space that you wish to warp INTO')
tightest_box = traits.Bool(argstr='--tightest-bounding-box',
desc=('computes tightest bounding box (overrided by '
'reference_image if given)'),
xor=['reference_image'])
reslice_by_header = traits.Bool(argstr='--reslice-by-header',
desc=('Uses orientation matrix and origin encoded in '
'reference image file header. Not typically used '
'with additional transforms'))
use_nearest = traits.Bool(argstr='--use-NN',
desc='Use nearest neighbor interpolation')
use_bspline = traits.Bool(argstr='--use-Bspline',
desc='Use 3rd order B-Spline interpolation')
transformation_series = InputMultiPath(File(exists=True), argstr='%s',
desc='transformation file(s) to be applied',
mandatory=True, copyfile=False)
invert_affine = traits.List(traits.Int,
desc=('List of Affine transformations to invert. '
'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines '
'found in transformation_series'))
class WarpTimeSeriesImageMultiTransformOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
class WarpTimeSeriesImageMultiTransform(ANTSCommand):
"""Warps a time-series from one space to another
Examples
--------
>>> from nipype.interfaces.ants import WarpTimeSeriesImageMultiTransform
>>> wtsimt = WarpTimeSeriesImageMultiTransform()
>>> wtsimt.inputs.input_image = 'resting.nii'
>>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz'
>>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt']
>>> wtsimt.cmdline
'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt'
"""
_cmd = 'WarpTimeSeriesImageMultiTransform'
input_spec = WarpTimeSeriesImageMultiTransformInputSpec
output_spec = WarpTimeSeriesImageMultiTransformOutputSpec
def _format_arg(self, opt, spec, val):
if opt == 'out_postfix':
_, name, ext = split_filename(
os.path.abspath(self.inputs.input_image))
return name + val + ext
if opt == 'transformation_series':
series = []
affine_counter = 0
for transformation in val:
if 'Affine' in transformation and \
isdefined(self.inputs.invert_affine):
affine_counter += 1
if affine_counter in self.inputs.invert_affine:
series += ['-i'],
series += [transformation]
return ' '.join(series)
return super(WarpTimeSeriesImageMultiTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(os.path.abspath(self.inputs.input_image))
outputs['output_image'] = os.path.join(os.getcwd(),
''.join((name,
self.inputs.out_postfix,
ext)))
return outputs
def _run_interface(self, runtime):
runtime = super(WarpTimeSeriesImageMultiTransform, self)._run_interface(runtime, correct_return_codes = [0,1])
if "100 % complete" not in runtime.stdout:
self.raise_exception(runtime)
return runtime
class WarpImageMultiTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(3, 2, argstr='%d', usedefault=True,
desc='image dimension (2 or 3)', position=1)
input_image = File(argstr='%s', mandatory=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'), position=2)
output_image = File(genfile=True, hash_files=False, argstr='%s',
desc=('name of the output warped image'), position=3,
xor=['out_postfix'])
out_postfix = traits.Str("_wimt", usedefault=True, hash_files=False,
desc=('Postfix that is prepended to all output '
'files (default = _wimt)'), xor=['output_image'])
reference_image = File(argstr='-R %s', xor=['tightest_box'],
desc='reference image space that you wish to warp INTO')
tightest_box = traits.Bool(argstr='--tightest-bounding-box',
desc=('computes tightest bounding box (overrided by '
'reference_image if given)'),
xor=['reference_image'])
reslice_by_header = traits.Bool(argstr='--reslice-by-header',
desc=('Uses orientation matrix and origin encoded in '
'reference image file header. Not typically used '
'with additional transforms'))
use_nearest = traits.Bool(argstr='--use-NN',
desc='Use nearest neighbor interpolation')
use_bspline = traits.Bool(argstr='--use-Bspline',
desc='Use 3rd order B-Spline interpolation')
transformation_series = InputMultiPath(File(exists=True), argstr='%s',
desc='transformation file(s) to be applied',
mandatory=True)
invert_affine = traits.List(traits.Int,
desc=('List of Affine transformations to invert.'
'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines '
'found in transformation_series. Note that indexing '
'starts with 1 and does not include warp fields. Affine '
'transformations are distinguished '
'from warp fields by the word "affine" included in their filenames.'))
class WarpImageMultiTransformOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
class WarpImageMultiTransform(ANTSCommand):
"""Warps an image from one space to another
Examples
--------
>>> from nipype.interfaces.ants import WarpImageMultiTransform
>>> wimt = WarpImageMultiTransform()
>>> wimt.inputs.input_image = 'structural.nii'
>>> wimt.inputs.reference_image = 'ants_deformed.nii.gz'
>>> wimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt']
>>> wimt.cmdline
'WarpImageMultiTransform 3 structural.nii structural_wimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt'
>>> wimt = WarpImageMultiTransform()
>>> wimt.inputs.input_image = 'diffusion_weighted.nii'
>>> wimt.inputs.reference_image = 'functional.nii'
>>> wimt.inputs.transformation_series = ['func2anat_coreg_Affine.txt','func2anat_InverseWarp.nii.gz','dwi2anat_Warp.nii.gz','dwi2anat_coreg_Affine.txt']
>>> wimt.inputs.invert_affine = [1]
>>> wimt.cmdline
'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii -i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt'
"""
_cmd = 'WarpImageMultiTransform'
input_spec = WarpImageMultiTransformInputSpec
output_spec = WarpImageMultiTransformOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
_, name, ext = split_filename(
os.path.abspath(self.inputs.input_image))
return ''.join((name, self.inputs.out_postfix, ext))
return None
def _format_arg(self, opt, spec, val):
if opt == 'transformation_series':
series = []
affine_counter = 0
for transformation in val:
if "affine" in transformation.lower() and \
isdefined(self.inputs.invert_affine):
affine_counter += 1
if affine_counter in self.inputs.invert_affine:
series += '-i',
series += [transformation]
return ' '.join(series)
return super(WarpImageMultiTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.output_image):
outputs['output_image'] = os.path.abspath(self.inputs.output_image)
else:
outputs['output_image'] = os.path.abspath(
self._gen_filename('output_image'))
return outputs
class ApplyTransformsInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(2, 3, 4, argstr='--dimensionality %d',
desc=('This option forces the image to be treated '
'as a specified-dimensional image. If not '
'specified, antsWarp tries to infer the '
'dimensionality from the input image.'))
input_image_type = traits.Enum(0, 1, 2, 3,
argstr='--input-image-type %d',
desc=('Option specifying the input image '
'type of scalar (default), vector, '
'tensor, or time series.'))
input_image = File(argstr='--input %s', mandatory=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'),
exists=True)
output_image = traits.Str(argstr='--output %s',
desc=('output file name'), genfile=True,
hash_file=False, xor=['out_postfix'])
out_postfix = traits.Str('_trans', usedefault=True, hash_files=False,
desc=('Postfix that is appended to all output'
' files (default = _trans)'),
xor=['output_image'])
reference_image = File(argstr='--reference-image %s', mandatory=True,
desc='reference image space that you wish to warp INTO',
exists=True)
interpolation = traits.Enum('Linear',
'NearestNeighbor',
'CosineWindowedSinc',
'WelchWindowedSinc',
'HammingWindowedSinc',
'LanczosWindowedSinc',
'MultiLabel',
'Gaussian',
'BSpline',
argstr='%s', usedefault=True)
# TODO: Implement these options for multilabel, gaussian, and bspline
# interpolation_sigma = traits.Float(requires=['interpolation'])
# interpolation_alpha = traits.Float(requires=['interpolation_sigma'])
# bspline_order = traits.Int(3, requires=['interpolation'])
transforms = traits.List(
File(exists=True), argstr='%s', mandatory=True, desc=(''))
invert_transform_flags = traits.List(traits.Bool())
default_value = traits.Float(
0.0, argstr='--default-value %d', usedefault=True)
print_out_composite_warp_file = traits.Enum(
0, 1, requires=["output_image"], desc=('')) # TODO: Change to boolean
class ApplyTransformsOutputSpec(TraitedSpec):
output_image = File(exists=True, desc='Warped image')
class ApplyTransforms(ANTSCommand):
"""ApplyTransforms, applied to an input image, transforms it according to a
reference image and a transform (or a set of transforms).
Examples
--------
>>> from nipype.interfaces.ants import ApplyTransforms
>>> at = ApplyTransforms()
>>> at.inputs.dimension = 3
>>> at.inputs.input_image = 'moving1.nii'
>>> at.inputs.reference_image = 'fixed1.nii'
>>> at.inputs.output_image = 'deformed_moving1.nii'
>>> at.inputs.interpolation = 'Linear'
>>> at.inputs.default_value = 0
>>> at.inputs.transforms = ['trans.mat', 'ants_Warp.nii.gz']
>>> at.inputs.invert_transform_flags = [False, False]
>>> at.cmdline
'antsApplyTransforms --default-value 0 --dimensionality 3 --input moving1.nii --interpolation Linear --output deformed_moving1.nii --reference-image fixed1.nii --transform [trans.mat,0] --transform [ants_Warp.nii.gz,0]'
"""
_cmd = 'antsApplyTransforms'
input_spec = ApplyTransformsInputSpec
output_spec = ApplyTransformsOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
output = self.inputs.output_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + self.inputs.out_postfix + ext
return output
return None
def _getTransformFileNames(self):
retval = []
for ii in range(len(self.inputs.transforms)):
if isdefined(self.inputs.invert_transform_flags):
if len(self.inputs.transforms) == len(self.inputs.invert_transform_flags):
invert_code = 1 if self.inputs.invert_transform_flags[
ii] else 0
retval.append("--transform [%s,%d]" %
(self.inputs.transforms[ii], invert_code))
else:
raise Exception("ERROR: The useInverse list must have the same number of entries as the transformsFileName list.")
else:
retval.append("--transform %s" % self.inputs.transforms[ii])
return " ".join(retval)
def _getOutputWarpedFileName(self):
if isdefined(self.inputs.print_out_composite_warp_file):
return "--output [%s,%s]" % (self._gen_filename("output_image"), self.inputs.print_out_composite_warp_file)
else:
return "--output %s" % (self._gen_filename("output_image"))
def _format_arg(self, opt, spec, val):
if opt == "output_image":
return self._getOutputWarpedFileName()
elif opt == "transforms":
return self._getTransformFileNames()
elif opt == 'interpolation':
# TODO: handle multilabel, gaussian, and bspline options
return '--interpolation %s' % self.inputs.interpolation
return super(ApplyTransforms, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_image'] = os.path.abspath(
self._gen_filename('output_image'))
return outputs
|
FredLoney/nipype
|
nipype/interfaces/ants/resampling.py
|
Python
|
bsd-3-clause
| 16,542
|
[
"Gaussian"
] |
7d92f3c2ea8f20360960b6d54e4ac26ae376c06b01ba984ffdae6fb1d626f7e1
|
""" codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import builtins
import sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder",
"StreamReader", "StreamWriter",
"StreamReaderWriter", "StreamRecoder",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"encode", "decode", "iterencode", "iterdecode",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"backslashreplace_errors", "namereplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
"""Codec details when looking up the codec registry"""
# Private API to allow Python 3.4 to denylist the known non-Unicode
# codecs in the standard library. A more general mechanism to
# reliably distinguish test encodings from other codecs will hopefully
# be defined for Python 3.5
#
# See http://bugs.python.org/issue19619
_is_text_encoding = True # Assume codecs are text encodings by default
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None,
*, _is_text_encoding=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
if _is_text_encoding is not None:
self._is_text_encoding = _is_text_encoding
return self
def __repr__(self):
return "<%s.%s object for encoding %s at %#x>" % \
(self.__class__.__module__, self.__class__.__qualname__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private code points U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences.
'namereplace' - Replace with \\N{...} escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamWriter for codecs which have to keep state in order to
make encoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamReader for codecs which have to keep state in order to
make decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create an IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences.
'namereplace' - Replace with \\N{...} escape sequences.
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Resets the codec buffers used for keeping internal state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'backslashreplace' - Replace with backslashed escape sequences;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of decoded code points or bytes to
return. read() will never return more data than requested,
but it might return less, if there is not enough available.
size indicates the approximate maximum number of decoded
bytes or code points to read for decoding. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy, meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
if chars < 0:
# For compatibility with other read() methods that take a
# single argument
chars = size
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
if not data:
break
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as a list.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping internal state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with StreamReaderWriter(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances translate data from one encoding to another.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the StreamRecoder is first decoded into an
intermediate format (depending on the "decode" codec) and then
written to the underlying stream using an instance of the provided
Writer class.
In the other direction, data is read from the underlying stream using
a Reader instance and then encoded and returned to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
data visible to .read() and .write()) while Reader and Writer
work on the backend (the data in stream).
You can use these objects to do transparent
transcodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode and decode must adhere to the Codec interface; Reader and
Writer must be factory functions or classes providing the
StreamReader and StreamWriter interfaces resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = b''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
# Seeks must be propagated to both the readers and writers
# as they might need to reset their internal buffers.
self.reader.seek(offset, whence)
self.writer.seek(offset, whence)
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='r', encoding=None, errors='strict', buffering=-1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Underlying encoded files are always opened in binary mode.
The default file mode is 'r', meaning to open the file in read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to -1 which means that the default buffer size will
be used.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
try:
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
except:
file.close()
raise
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Data written to the wrapped file is decoded according
to the given data_encoding and then encoded to the underlying
file using file_encoding. The intermediate data type
will usually be Unicode but depends on the specified codecs.
Bytes read from the file are decoded using file_encoding and then
passed back to the caller encoded using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using an IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using an IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \\u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
namereplace_errors = lookup_error("namereplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
namereplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
|
brython-dev/brython
|
www/src/Lib/codecs.py
|
Python
|
bsd-3-clause
| 36,672
|
[
"FEFF"
] |
49b4ee951cb4ffc1fcb6b5497888952191dd62337c03ec8475e954dbcad5f0f2
|
from framework.utils import OWTFLogger
from framework.dependency_management.dependency_resolver import ServiceLocator
"""
This plugin does not perform ANY test: The aim is to visit all URLs grabbed so far and build the transaction log to feed data to other plugins
NOTE: This is an active plugin because it may visit URLs retrieved by vulnerability scanner spiders which may be considered sensitive or include vulnerability probing
"""
DESCRIPTION = "Visit URLs found by other tools, some could be sensitive: need permission"
def run(PluginInfo):
#ServiceLocator.get_component("config").Show()
urls = ServiceLocator.get_component("url_manager").GetURLsToVisit()
for url in urls: # This will return only unvisited urls
ServiceLocator.get_component("requester").GetTransaction(True, url) # Use cache if possible
Content = str(len(urls))+" URLs were visited"
OWTFLogger.log(Content)
return ServiceLocator.get_component("plugin_helper").HtmlString(Content)
|
sharad1126/owtf
|
plugins/web/active/[email protected]
|
Python
|
bsd-3-clause
| 988
|
[
"VisIt"
] |
89351284615eb8bfd7d4a078b15d3d64e30b44aba17a7839026df8bcda12c2ff
|
import requests
import utils
CLIENT_ID = "dAhCaXVVvRdCjh7BcA"
CLIENT_SECRET = "JvuwZv9NPgV6E5MqzW8Lfb3FgjLpNFzK"
REPO_UUID = None
def get_auth_url():
return "https://bitbucket.org/site/oauth2/authorize?client_id={0}&response_type=code".format(CLIENT_ID)
def is_logged_in(repo_uuid):
try:
if repo_uuid is None:
raise Exception
return utils.in_db(repo_uuid)
# with open("bitbucket_access_token") as f:
# ACCESS_TOKEN = f.read()
# return True
except:
return False
def get_auth_token(request):
# headers = {"Content-type": "application/x-www-form-urlencoded"}
code = request.GET["code"]
data = {"code": code, "grant_type": "authorization_code"}
auth = (CLIENT_ID, CLIENT_SECRET)
try:
r = requests.post("https://bitbucket.org/site/oauth2/access_token", data=data, auth=auth)
if request.COOKIES.has_key('repo_uuid'):
repo_uuid = request.COOKIES['repo_uuid']
ACCESS_TOKEN = r.json()["access_token"]
utils.add_bitbucket_token_to_db(repo_uuid, ACCESS_TOKEN)
# with open('bitbucket_access_token', 'w') as f:
# f.write(ACCESS_TOKEN)
# print ACCESS_TOKEN
return r.json()
except:
return None
def verify(request):
# ensure we have a session state and the state value is the same as what Bitbucket returned
if 'code' not in request.GET:
return False
else:
return True
def get_request_headers(repo_uuid):
# with open("bitbucket_access_token") as f:
# ACCESS_TOKEN = f.read()
ACCESS_TOKEN = utils.get_token(repo_uuid, "Bitbucket")
headers = {"Authorization": "Bearer " + ACCESS_TOKEN}
return headers
def set_repo_uuid(repo_uuid):
global REPO_UUID # use global variable
REPO_UUID = repo_uuid
# Also save to file. This should reset each time you visit a new repo
# with open("repo_uuid", 'w') as f:
# f.write(REPO_UUID)
def get_repo_uuid():
with open("repo_uuid") as f:
UUID = f.read()
return UUID
def add_to_wiki(pages_list, repo_uuid):
content = "\n".join(pages_list)
data = {"content": content}
print "Adding to Wiki"
try:
r = requests.post("https://bitbucket.org/api/1.0/repositories/{1}/{0}/wiki/notes".format(repo_uuid, "{}"),
data=data)
print r.reason
except:
print "Oops"
if r.ok:
print "Added to Wiki!!"
return True
else:
return False
|
varunagrawal/ClassNotes
|
notes/bitbucket.py
|
Python
|
mit
| 2,614
|
[
"VisIt"
] |
bf68915fd66aed2f967f4d3210160389185c0bdae61a4e224c1ee3dcb068b52c
|
import numpy as np
from bayesnet.array.broadcast import broadcast_to
from bayesnet.math.exp import exp
from bayesnet.math.log import log
from bayesnet.math.sqrt import sqrt
from bayesnet.math.square import square
from bayesnet.random.random import RandomVariable
from bayesnet.tensor.constant import Constant
from bayesnet.tensor.tensor import Tensor
class GaussianMixture(RandomVariable):
"""
Mixture of the Gaussian distribution
p(x|w, mu, std)
= w_1 * N(x|mu_1, std_1) + ... + w_K * N(x|mu_K, std_K)
Parameters
----------
coef : tensor_like
mixing coefficient whose sum along specified axis should equal to 1
mu : tensor_like
mean parameter along specified axis for each component
std : tensor_like
std parameter along specified axis for each component
axis : int
axis along which represents each component
data : tensor_like
realization
p : RandomVariable
original distribution of a model
"""
def __init__(self, coef, mu, std, axis=-1, data=None, p=None):
super().__init__(data, p)
assert axis == -1
self.axis = axis
self.coef, self.mu, self.std = self._check_input(coef, mu, std)
def _check_input(self, coef, mu, std):
coef = self._convert2tensor(coef)
mu = self._convert2tensor(mu)
std = self._convert2tensor(std)
if not coef.shape == mu.shape == std.shape:
shape = np.broadcast(coef.value, mu.value, std.value).shape
if coef.shape != shape:
coef = broadcast_to(coef, shape)
if mu.shape != shape:
mu = broadcast_to(mu, shape)
if std.shape != shape:
std = broadcast_to(std, shape)
self.n_component = coef.shape[self.axis]
return coef, mu, std
@property
def axis(self):
return self.parameter["axis"]
@axis.setter
def axis(self, axis):
if not isinstance(axis, int):
raise TypeError("axis must be int")
self.parameter["axis"] = axis
@property
def coef(self):
return self.parameter["coef"]
@coef.setter
def coef(self, coef):
self._atleast_ndim(coef, 1)
if (coef.value < 0).any():
raise ValueError("value of mixing coefficient must all be positive")
if not np.allclose(coef.value.sum(axis=self.axis), 1):
raise ValueError("sum of mixing coefficients must be 1")
self.parameter["coef"] = coef
@property
def mu(self):
return self.parameter["mu"]
@mu.setter
def mu(self, mu):
self.parameter["mu"] = mu
@property
def std(self):
return self.parameter["std"]
@std.setter
def std(self, std):
self._atleast_ndim(std, 1)
if (std.value < 0).any():
raise ValueError("value of std must all be positive")
self.parameter["std"] = std
@property
def var(self):
return square(self.parameter["std"])
def forward(self):
if self.coef.ndim != 1:
raise NotImplementedError
indices = np.array(
[np.random.choice(self.n_component, p=c) for c in self.coef.value]
)
output = np.random.normal(
loc=self.mu.value[indices],
scale=self.std.value[indices]
)
if (
isinstance(self.coef, Constant)
and isinstance(self.mu, Constant)
and isinstance(self.std, Constant)
):
return Constant(output)
return Tensor(output, function=self)
def backward(self):
raise NotImplementedError
def _pdf(self, x):
gauss = (
exp(-0.5 * square((x - self.mu) / self.std))
/ sqrt(2 * np.pi) / self.std
)
return (self.coef * gauss).sum(axis=self.axis)
def _log_pdf(self, x):
return log(self.pdf(x))
|
ctgk/BayesianNetwork
|
bayesnet/random/gaussian_mixture.py
|
Python
|
mit
| 3,939
|
[
"Gaussian"
] |
50a6f76e42b2a68c5f6b5fe7c3e75fa9b05a8525aec45fa609c23a8e8200f3be
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - feed some FCKeditor dialogues
@copyright: 2005-2006 Bastian Blank, Florian Festi, Thomas Waldmann
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin import config, wikiutil
from MoinMoin.action.AttachFile import _get_files
from MoinMoin.Page import Page
import re
##############################################################################
### Macro dialog
##############################################################################
def macro_dialog(request):
help = get_macro_help(request)
request.write(
'''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>Insert Macro</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta content="noindex,nofollow" name="robots">
<script src="%s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script>
<script language="javascript">
var oEditor = window.parent.InnerDialogLoaded() ;
var FCKLang = oEditor.FCKLang ;
var FCKMacros = oEditor.FCKMacros ;
window.onload = function ()
{
// First of all, translate the dialog box texts
oEditor.FCKLanguageManager.TranslatePage( document ) ;
OnChange( "BR" );
// Show the "Ok" button.
window.parent.SetOkButton( true ) ;
}
function Ok()
{
if ( document.getElementById('txtName').value.length == 0 )
{
alert( FCKLang.MacroErrNoName ) ;
return false ;
}
FCKMacros.Add( txtName.value ) ;
return true ;
}
function OnChange( sMacro )
{
// sMacro = GetE("txtName").value;
oHelp = GetE("help");
for (var i=0; i<oHelp.childNodes.length; i++)
{
var oDiv = oHelp.childNodes[i];
if (oDiv.nodeType==1)
{
// oDiv.style.display = (GetAttribute(oDiv, "id", "")==sMacro) ? '' : 'none';
if (GetAttribute(oDiv, "id", "") == sMacro)
{
oDiv.style.display = '' ;
// alert("enabled div id " + sMacro) ;
}
else
{
oDiv.style.display = 'none' ;
}
}
}
}
</script>
</head>
<body scroll="no" style="OVERFLOW: hidden">
<table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0">
<tr>
<td>
<table cellSpacing="0" cellPadding="0" align="center" border="0">
<tr>
<td valign="top">
<span fckLang="MacroDlgName">Macro Name</span><br>
<select id="txtName" size="10" onchange="OnChange(this.value);">
''' % request.cfg.url_prefix_static)
macros = []
for macro in macro_list(request):
if macro == "BR":
selected = ' selected="selected"'
else:
selected = ''
if macro in help:
macros.append('<option value="%s"%s>%s</option>' %
(help[macro].group('prototype'), selected, macro))
else:
macros.append('<option value="%s"%s>%s</option>' %
(macro, selected, macro))
request.write('\n'.join(macros))
request.write('''
</select>
</td>
<td id="help">''')
helptexts = []
for macro in macro_list(request):
if macro in help:
match = help[macro]
prototype = match.group('prototype')
helptext = match.group('help')
else:
prototype = macro
helptext = ""
helptexts.append(
'''<div id="%s" style="DISPLAY: none">
<b><<%s>></b>
<br/>
<textarea style="color:#000000" cols="37" rows="10" disabled="disabled">%s</textarea>
</div>'''
% (prototype, prototype, helptext))
request.write(''.join(helptexts))
request.write('''
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>
''')
def macro_list(request):
from MoinMoin import macro
macros = macro.getNames(request.cfg)
macros.sort()
return macros
def get_macro_help(request):
""" Read help texts from SystemPage('HelpOnMacros')"""
helppage = wikiutil.getLocalizedPage(request, "HelpOnMacros")
content = helppage.get_raw_body()
macro_re = re.compile(
r"\|\|(<.*?>)?\{\{\{" +
r"<<(?P<prototype>(?P<macro>\w*).*)>>" +
r"\}\}\}\s*\|\|" +
r"[^|]*\|\|[^|]*\|\|<[^>]*>" +
r"\s*(?P<help>.*?)\s*\|\|\s*(?P<example>.*?)\s*(<<[^>]*>>)*\s*\|\|$", re.U|re.M)
help = {}
for match in macro_re.finditer(content):
help[match.group('macro')] = match
return help
##############################################################################
### Link dialog
##############################################################################
def page_list(request):
from MoinMoin import search
name = request.values.get("pagename", "")
if name:
searchresult = search.searchPages(request, 't:"%s"' % name)
pages = [p.page_name for p in searchresult.hits]
else:
pages = [name]
request.write(
'''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>Insert Page Link</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta content="noindex,nofollow" name="robots">
</head>
<body scroll="no" style="OVERFLOW: hidden">
<table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0">
<tr>
<td>
<table cellSpacing="0" cellPadding="0" align="center" border="0">
<tr>
<td>
<span fckLang="PageDlgName">Page name</span><br>
<select id="txtName" size="1">
%s
</select>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>
''' % "".join(["<option>%s</option>\n" % p for p in pages]))
def link_dialog(request):
# list of wiki pages
name = request.values.get("pagename", "")
if name:
from MoinMoin import search
# XXX error handling!
searchresult = search.searchPages(request, 't:"%s"' % name)
pages = [p.page_name for p in searchresult.hits]
pages.sort()
pages[0:0] = [name]
page_list = '''
<tr>
<td colspan=2>
<select id="sctPagename" size="1" onchange="OnChangePagename(this.value);">
%s
</select>
<td>
</tr>
''' % "\n".join(['<option value="%s">%s</option>' % (page, page)
for page in pages])
else:
page_list = ""
# list of interwiki names
interwiki_list = wikiutil.load_wikimap(request)
interwiki = interwiki_list.keys()
interwiki.sort()
iwpreferred = request.cfg.interwiki_preferred[:]
if not iwpreferred or iwpreferred and iwpreferred[-1] is not None:
resultlist = iwpreferred
for iw in interwiki:
if not iw in iwpreferred:
resultlist.append(iw)
else:
resultlist = iwpreferred[:-1]
interwiki = "\n".join(
['<option value="%s">%s</option>' % (key, key) for key in resultlist])
# wiki url
url_prefix_static = request.cfg.url_prefix_static
scriptname = request.script_root + '/'
action = scriptname
basepage = request.page.page_name
request.write(u'''
<!--
* FCKeditor - The text editor for internet
* Copyright (C) 2003-2004 Frederico Caldeira Knabben
*
* Licensed under the terms of the GNU Lesser General Public License:
* http://www.opensource.org/licenses/lgpl-license.php
*
* For further information visit:
* http://www.fckeditor.net/
*
* File Name: fck_link.html
* Link dialog window.
*
* Version: 2.0 FC (Preview)
* Modified: 2005-02-18 23:55:22
*
* File Authors:
* Frederico Caldeira Knabben ([email protected])
-->
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<meta name="robots" content="index,nofollow">
<html>
<head>
<title>Link Properties</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="robots" content="noindex,nofollow" />
<script src="%(url_prefix_static)s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script>
<script src="%(url_prefix_static)s/applets/moinFCKplugins/moinlink/fck_link.js" type="text/javascript"></script>
<script src="%(url_prefix_static)s/applets/moinFCKplugins/moinurllib.js" type="text/javascript"></script>
</head>
<body scroll="no" style="OVERFLOW: hidden">
<div id="divInfo" style="DISPLAY: none">
<span fckLang="DlgLnkType">Link Type</span><br />
<select id="cmbLinkType" onchange="SetLinkType(this.value);">
<option value="wiki" selected="selected">WikiPage</option>
<option value="interwiki">Interwiki</option>
<option value="url" fckLang="DlgLnkTypeURL">URL</option>
</select>
<br />
<br />
<div id="divLinkTypeWiki">
<table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0">
<tr>
<td>
<form action=%(action)s method="GET">
<input type="hidden" name="action" value="fckdialog">
<input type="hidden" name="dialog" value="link">
<input type="hidden" id="basepage" name="basepage" value="%(basepage)s">
<table cellSpacing="0" cellPadding="0" align="center" border="0">
<tr>
<td>
<span fckLang="PageDlgName">Page Name</span><br>
<input id="txtPagename" name="pagename" size="30" value="%(name)s">
</td>
<td valign="bottom">
<input id=btnSearchpage type="submit" value="Search">
</td>
</tr>
%(page_list)s
</table>
</form>
</td>
</tr>
</table>
</div>
<div id="divLinkTypeInterwiki">
<table height="100%%" cellSpacing="0" cellPadding="0" width="100%%" border="0">
<tr>
<td>
<table cellSpacing="0" cellPadding="0" align="center" border="0">
<tr>
<td>
<span fckLang="WikiDlgName">Wiki:PageName</span><br>
<select id="sctInterwiki" size="1">
%(interwiki)s
</select>:
<input id="txtInterwikipagename"></input>
</td>
</tr>
</table>
</td>
</tr>
</table>
</div>
<div id="divLinkTypeUrl">
<table cellspacing="0" cellpadding="0" width="100%%" border="0">
<tr>
<td nowrap="nowrap">
<span fckLang="DlgLnkProto">Protocol</span><br />
<select id="cmbLinkProtocol">
<option value="http://" selected="selected">http://</option>
<option value="https://">https://</option>
<option value="ftp://">ftp://</option>
<option value="file://">file://</option>
<option value="news://">news://</option>
<option value="mailto:">mailto:</option>
<option value="" fckLang="DlgLnkProtoOther"><other></option>
</select>
</td>
<td nowrap="nowrap"> </td>
<td nowrap="nowrap" width="100%%">
<span fckLang="DlgLnkURL">URL</span><br />
<input id="txtUrl" style="WIDTH: 100%%" type="text" onkeyup="OnUrlChange();" onchange="OnUrlChange();" />
</td>
</tr>
</table>
<br />
</div>
</div>
</body>
</html>
''' % locals())
def attachment_dialog(request):
""" Attachment dialog for GUI editor. """
""" Features: This dialog can... """
""" - list attachments in a drop down list """
""" - list attachments also for a different page than the current one """
""" - create new attachment """
_ = request.getText
url_prefix_static = request.cfg.url_prefix_static
# wiki url
action = request.script_root + "/"
# The following code lines implement the feature "list attachments for a different page".
# Meaning of the variables:
# - requestedPagename : Name of the page where attachments shall be listed from.
# - attachmentsPagename : Name of the page where the attachments where retrieved from.
# - destinationPagename : Name of the page where attachment will be placed on.
requestedPagename = wikiutil.escape(request.values.get("requestedPagename", ""), quote=True)
destinationPagename = wikiutil.escape(request.values.get("destinationPagename", request.page.page_name), quote=True)
attachmentsPagename = requestedPagename or request.page.page_name
attachments = _get_files(request, attachmentsPagename)
attachments.sort()
attachmentList = '''
<select id="sctAttachments" size="10" style="width:100%%;visibility:hidden;" onchange="OnAttachmentListChange();">
%s
</select>
''' % "\n".join(['<option value="%s">%s</option>' % (wikiutil.escape(attachment, quote=True), wikiutil.escape(attachment, quote=True))
for attachment in attachments])
# Translation of dialog texts.
langAttachmentLocation = _("Attachment location")
langPagename = _("Page name")
langAttachmentname = _("Attachment name")
langListAttachmentsButton = _("Refresh attachment list")
langAttachmentList = _("List of attachments")
if len(attachmentsPagename) > 50:
shortenedPagename = "%s ... %s" % (attachmentsPagename[0:25], attachmentsPagename[-25:])
else:
shortenedPagename = attachmentsPagename
langAvailableAttachments = "%s: %s" % (_("Available attachments for page"), shortenedPagename)
request.write('''
<!--
* FCKeditor - The text editor for internet
* Copyright (C) 2003-2004 Frederico Caldeira Knabben
*
* Licensed under the terms of the GNU Lesser General Public License:
* http://www.opensource.org/licenses/lgpl-license.php
*
* For further information visit:
* http://www.fckeditor.net/
*
* File Name: fck_attachment.html
* Attachment dialog window.
*
* Version: 2.0 FC (Preview)
* Modified: 2005-02-18 23:55:22
*
* File Authors:
* Frederico Caldeira Knabben ([email protected])
-->
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<meta name="robots" content="index,nofollow">
<html>
<head>
<title>Attachment Properties</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="robots" content="noindex,nofollow" />
<script src="%(url_prefix_static)s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script>
<script src="%(url_prefix_static)s/applets/moinFCKplugins/moinattachment/fck_attachment.js" type="text/javascript"></script>
<script src="%(url_prefix_static)s/applets/moinFCKplugins/moinurllib.js" type="text/javascript"></script>
</head>
<body scroll="no" style="OVERFLOW: hidden">
<form id="DlgAttachmentForm" name="DlgAttachmentForm" action=%(action)s method="GET">
<input type="hidden" name="action" value="fckdialog">
<input type="hidden" name="dialog" value="attachment">
<input type="hidden" id="requestedPagename" name="requestedPagename" value="%(requestedPagename)s">
<input type="hidden" id="attachmentsPagename" name="attachmentsPagename" value="%(attachmentsPagename)s">
<input type="hidden" id="destinationPagename" name="destinationPagename" value="%(destinationPagename)s">
<div id="divInfo" style="valign=top;">
<div id="divLinkTypeAttachment">
<fieldset>
<legend>%(langAttachmentLocation)s</legend>
<table cellSpacing="0" cellPadding="0" width="100%%" border="0">
<tr>
<td valign="bottom" style="width:90%%" style="padding-bottom:10px">
<span>%(langPagename)s</span><br>
</td>
</tr>
<tr>
<td valign="bottom" style="width:100%%" style="padding-bottom:10px;padding-right:10px;">
<input id="txtPagename" type="text" onkeyup="OnPagenameChange();" onchange="OnPagenameChange();" style="width:98%%">
</td>
</tr>
<tr>
<td valign="bottom" style="width:90%%" style="padding-bottom:10px;">
<span>%(langAttachmentname)s</span><br>
</td>
</tr>
<tr valign="bottom">
<td valign="bottom" style="width:100%%" style="padding-bottom:10px;padding-right:10px;">
<input id="txtAttachmentname" type="text" onkeyup="OnAttachmentnameChange();" onchange="OnPagenameChange();" style="width:98%%"><br>
</td>
</tr>
</table>
</fieldset>
<fieldset>
<legend>%(langAvailableAttachments)s</legend>
<table cellSpacing="0" cellPadding="0" width="100%%" border="0">
<tr>
<td valign="bottom" style="width:100%%" style="padding-bottom:10px">
<input id="btnListAttachments" type="submit" value="%(langListAttachmentsButton)s">
</td>
</tr>
<tr>
<td valign="top" style="padding-top:10px">
<label for="sctAttachments">%(langAttachmentList)s</label><br>
%(attachmentList)s
</td>
</tr>
</table>
</fieldset>
</div>
</div>
</form>
</body>
</html>
''' % locals())
##############################################################################
### Image dialog
##############################################################################
def image_dialog(request):
url_prefix_static = request.cfg.url_prefix_static
request.write('''
<!--
* FCKeditor - The text editor for internet
* Copyright (C) 2003-2004 Frederico Caldeira Knabben
*
* Licensed under the terms of the GNU Lesser General Public License:
* http://www.opensource.org/licenses/lgpl-license.php
*
* For further information visit:
* http://www.fckeditor.net/
*
* File Authors:
* Frederico Caldeira Knabben ([email protected])
* Florian Festi
-->
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>Link Properties</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="robots" content="noindex,nofollow" />
<script src="%(url_prefix_static)s/applets/FCKeditor/editor/dialog/common/fck_dialog_common.js" type="text/javascript"></script>
<script src="%(url_prefix_static)s/applets/moinFCKplugins/moinimage/fck_image.js" type="text/javascript"></script>
<script src="%(url_prefix_static)s/applets/moinFCKplugins/moinurllib.js" type="text/javascript"></script>
</head>
<body scroll="no" style="OVERFLOW: hidden">
<table cellspacing="0" cellpadding="0" width="100%%" border="0">
<tr>
<td nowrap="nowrap">
<span fckLang="DlgLnkProto">Protocol</span><br />
<select id="cmbLinkProtocol" onchange="OnProtocolChange();">
<option value="attachment:" selected="selected">attachment:</option>
<option value="http://">http://</option>
<option value="https://">https://</option>
<!-- crashes often: <option value="drawing:">drawing:</option> -->
<option value="" fckLang="DlgLnkProtoOther"><other></option>
</select>
</td>
<td nowrap="nowrap"> </td>
<td nowrap="nowrap" width="100%%">
<span fckLang="DlgLnkURL">URL or File Name (attachment:)</span><br />
<input id="txtUrl" style="WIDTH: 100%%" type="text" onkeyup="OnUrlChange();" onchange="OnUrlChange();" />
</td>
</tr>
<tr>
<td colspan=2>
<div id="divChkLink">
<input id="chkLink" type="checkbox"> Link to
</div>
</td>
</table>
</body>
</html>
''' % locals())
#############################################################################
### Main
#############################################################################
def execute(pagename, request):
dialog = request.values.get("dialog", "")
if dialog == "macro":
macro_dialog(request)
elif dialog == "macrolist":
macro_list(request)
elif dialog == "pagelist":
page_list(request)
elif dialog == "link":
link_dialog(request)
elif dialog == "attachment":
attachment_dialog(request)
elif dialog == 'image':
image_dialog(request)
else:
from MoinMoin.Page import Page
request.theme.add_msg("Dialog unknown!", "error")
Page(request, pagename).send_page()
|
Glottotopia/aagd
|
moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/action/fckdialog.py
|
Python
|
mit
| 20,690
|
[
"VisIt"
] |
313fbed966be153e5e59a7d66906c5f9b30541f6a8df2df95d97c00168526957
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Created on Mar 18, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 18, 2012"
import unittest
import os
import warnings
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class BorgQueenTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_get_data(self):
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
data = self.queen.get_data()
self.assertEqual(len(data), 11)
def test_load_data(self):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
if __name__ == "__main__":
unittest.main()
|
gVallverdu/pymatgen
|
pymatgen/apps/borg/tests/test_queen.py
|
Python
|
mit
| 1,250
|
[
"pymatgen"
] |
4c5f26d4499235df56cdb490ba9c2c6f77493d92598b5d8c6cf3b2ee8901cd48
|
"""
=====================================
SGDOneClassSVM benchmark
=====================================
This benchmark compares the :class:`SGDOneClassSVM` with :class:`OneClassSVM`.
The former is an online One-Class SVM implemented with a Stochastic Gradient
Descent (SGD). The latter is based on the LibSVM implementation. The
complexity of :class:`SGDOneClassSVM` is linear in the number of samples
whereas the one of :class:`OneClassSVM` is at best quadratic in the number of
samples. We here compare the performance in terms of AUC and training time on
classical anomaly detection datasets.
The :class:`OneClassSVM` is applied with a Gaussian kernel and we therefore
use a kernel approximation prior to the application of :class:`SGDOneClassSVM`.
"""
from time import time
import numpy as np
from scipy.interpolate import interp1d
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype
from sklearn.preprocessing import LabelBinarizer, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.utils import shuffle
from sklearn.kernel_approximation import Nystroem
from sklearn.svm import OneClassSVM
from sklearn.linear_model import SGDOneClassSVM
import matplotlib.pyplot as plt
import matplotlib
font = {'weight': 'normal',
'size': 15}
matplotlib.rc('font', **font)
print(__doc__)
def print_outlier_ratio(y):
"""
Helper function to show the distinct value count of element in the target.
Useful indicator for the datasets used in bench_isolation_forest.py.
"""
uniq, cnt = np.unique(y, return_counts=True)
print("----- Target count values: ")
for u, c in zip(uniq, cnt):
print("------ %s -> %d occurrences" % (str(u), c))
print("----- Outlier ratio: %.5f" % (np.min(cnt) / len(y)))
# for roc curve computation
n_axis = 1000
x_axis = np.linspace(0, 1, n_axis)
datasets = ['http', 'smtp', 'SA', 'SF', 'forestcover']
novelty_detection = False # if False, training set polluted by outliers
random_states = [42]
nu = 0.05
results_libsvm = np.empty((len(datasets), n_axis + 5))
results_online = np.empty((len(datasets), n_axis + 5))
for dat, dataset_name in enumerate(datasets):
print(dataset_name)
# Loading datasets
if dataset_name in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dataset_name, shuffle=False,
percent10=False, random_state=88)
X = dataset.data
y = dataset.target
if dataset_name == 'forestcover':
dataset = fetch_covtype(shuffle=False)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
# Vectorizing data
if dataset_name == 'SF':
# Casting type of X (object) as string is needed for string categorical
# features to apply LabelBinarizer
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != b'normal.').astype(int)
if dataset_name == 'SA':
lb = LabelBinarizer()
# Casting type of X (object) as string is needed for string categorical
# features to apply LabelBinarizer
x1 = lb.fit_transform(X[:, 1].astype(str))
x2 = lb.fit_transform(X[:, 2].astype(str))
x3 = lb.fit_transform(X[:, 3].astype(str))
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != b'normal.').astype(int)
if dataset_name in ['http', 'smtp']:
y = (y != b'normal.').astype(int)
print_outlier_ratio(y)
n_samples, n_features = np.shape(X)
if dataset_name == 'SA': # LibSVM too long with n_samples // 2
n_samples_train = n_samples // 20
else:
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
print('n_train: ', n_samples_train)
print('n_features: ', n_features)
tpr_libsvm = np.zeros(n_axis)
tpr_online = np.zeros(n_axis)
fit_time_libsvm = 0
fit_time_online = 0
predict_time_libsvm = 0
predict_time_online = 0
X = X.astype(float)
gamma = 1 / n_features # OCSVM default parameter
for random_state in random_states:
print('random state: %s' % random_state)
X, y = shuffle(X, y, random_state=random_state)
X_train = X[:n_samples_train]
X_test = X[n_samples_train:]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
if novelty_detection:
X_train = X_train[y_train == 0]
y_train = y_train[y_train == 0]
std = StandardScaler()
print('----------- LibSVM OCSVM ------------')
ocsvm = OneClassSVM(kernel='rbf', gamma=gamma, nu=nu)
pipe_libsvm = make_pipeline(std, ocsvm)
tstart = time()
pipe_libsvm.fit(X_train)
fit_time_libsvm += time() - tstart
tstart = time()
# scoring such that the lower, the more normal
scoring = -pipe_libsvm.decision_function(X_test)
predict_time_libsvm += time() - tstart
fpr_libsvm_, tpr_libsvm_, _ = roc_curve(y_test, scoring)
f_libsvm = interp1d(fpr_libsvm_, tpr_libsvm_)
tpr_libsvm += f_libsvm(x_axis)
print('----------- Online OCSVM ------------')
nystroem = Nystroem(gamma=gamma, random_state=random_state)
online_ocsvm = SGDOneClassSVM(nu=nu, random_state=random_state)
pipe_online = make_pipeline(std, nystroem, online_ocsvm)
tstart = time()
pipe_online.fit(X_train)
fit_time_online += time() - tstart
tstart = time()
# scoring such that the lower, the more normal
scoring = -pipe_online.decision_function(X_test)
predict_time_online += time() - tstart
fpr_online_, tpr_online_, _ = roc_curve(y_test, scoring)
f_online = interp1d(fpr_online_, tpr_online_)
tpr_online += f_online(x_axis)
tpr_libsvm /= len(random_states)
tpr_libsvm[0] = 0.
fit_time_libsvm /= len(random_states)
predict_time_libsvm /= len(random_states)
auc_libsvm = auc(x_axis, tpr_libsvm)
results_libsvm[dat] = ([fit_time_libsvm, predict_time_libsvm,
auc_libsvm, n_samples_train,
n_features] + list(tpr_libsvm))
tpr_online /= len(random_states)
tpr_online[0] = 0.
fit_time_online /= len(random_states)
predict_time_online /= len(random_states)
auc_online = auc(x_axis, tpr_online)
results_online[dat] = ([fit_time_online, predict_time_online,
auc_online, n_samples_train,
n_features] + list(tpr_libsvm))
# -------- Plotting bar charts -------------
fit_time_libsvm_all = results_libsvm[:, 0]
predict_time_libsvm_all = results_libsvm[:, 1]
auc_libsvm_all = results_libsvm[:, 2]
n_train_all = results_libsvm[:, 3]
n_features_all = results_libsvm[:, 4]
fit_time_online_all = results_online[:, 0]
predict_time_online_all = results_online[:, 1]
auc_online_all = results_online[:, 2]
width = 0.7
ind = 2 * np.arange(len(datasets))
x_tickslabels = [(name + '\n' + r'$n={:,d}$' + '\n' + r'$d={:d}$')
.format(int(n), int(d))
for name, n, d in zip(datasets, n_train_all, n_features_all)]
def autolabel_auc(rects, ax):
"""Attach a text label above each bar displaying its height."""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%.3f' % height, ha='center', va='bottom')
def autolabel_time(rects, ax):
"""Attach a text label above each bar displaying its height."""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%.1f' % height, ha='center', va='bottom')
fig, ax = plt.subplots(figsize=(15, 8))
ax.set_ylabel('AUC')
ax.set_ylim((0, 1.3))
rect_libsvm = ax.bar(ind, auc_libsvm_all, width=width, color='r')
rect_online = ax.bar(ind + width, auc_online_all, width=width, color='y')
ax.legend((rect_libsvm[0], rect_online[0]), ('LibSVM', 'Online SVM'))
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(x_tickslabels)
autolabel_auc(rect_libsvm, ax)
autolabel_auc(rect_online, ax)
plt.show()
fig, ax = plt.subplots(figsize=(15, 8))
ax.set_ylabel('Training time (sec) - Log scale')
ax.set_yscale('log')
rect_libsvm = ax.bar(ind, fit_time_libsvm_all, color='r', width=width)
rect_online = ax.bar(ind + width, fit_time_online_all, color='y', width=width)
ax.legend((rect_libsvm[0], rect_online[0]), ('LibSVM', 'Online SVM'))
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(x_tickslabels)
autolabel_time(rect_libsvm, ax)
autolabel_time(rect_online, ax)
plt.show()
fig, ax = plt.subplots(figsize=(15, 8))
ax.set_ylabel('Testing time (sec) - Log scale')
ax.set_yscale('log')
rect_libsvm = ax.bar(ind, predict_time_libsvm_all, color='r', width=width)
rect_online = ax.bar(ind + width, predict_time_online_all,
color='y', width=width)
ax.legend((rect_libsvm[0], rect_online[0]), ('LibSVM', 'Online SVM'))
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(x_tickslabels)
autolabel_time(rect_libsvm, ax)
autolabel_time(rect_online, ax)
plt.show()
|
glemaitre/scikit-learn
|
benchmarks/bench_online_ocsvm.py
|
Python
|
bsd-3-clause
| 9,395
|
[
"Gaussian"
] |
d1bef8a8a654def98d6574ea14a0b84ae402e1b5e4a11438d6f43459a5bb7203
|
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from django.views import generic
from django.views.generic.edit import DeleteView, UpdateView, CreateView
from django.core.urlresolvers import reverse_lazy
from django.contrib import messages
from .models import Client
from visit.models import Visit
from scheduler.views import LoginRequiredMixin
from .forms import ClientForm
from report.reports import monthDetail
################################################################################
class ClientList(LoginRequiredMixin, generic.ListView):
model = Client
################################################################################
class ClientDetail(LoginRequiredMixin, generic.DetailView):
model = Client
def get_context_data(self, *args, **kwargs):
context = super(ClientDetail, self).get_context_data(*args, **kwargs)
client = kwargs['object']
d = monthDetail()
context['visits'] = Visit.objects.filter(client=client)
context['year'] = d['year']
context['month'] = d['month']
context['month_days'] = d['month_days']
context['mname'] = d['mname']
context['client'] = client
return context
################################################################################
class ClientUpdate(LoginRequiredMixin, UpdateView):
model = Client
form_class = ClientForm
def get_success_url(self):
return reverse_lazy('clientDetail', kwargs={'pk': self.object.id})
################################################################################
class ClientDelete(LoginRequiredMixin, DeleteView):
model = Client
success_url = reverse_lazy('clientList')
################################################################################
class ClientNew(LoginRequiredMixin, CreateView):
model = Client
form_class = ClientForm
def get_success_url(self):
return reverse_lazy('clientDetail', kwargs={'pk': self.object.id})
################################################################################
@login_required
def clientDeleteVisits(requst, pk):
""" Delete all the visits for a client """
c = Client.objects.get(pk=pk)
for v in Visit.objects.filter(client=c):
v.delete()
return redirect("clientDetail", pk=pk)
################################################################################
@login_required
def clientGenerateVisits(request, pk):
c = Client.objects.get(pk=pk)
msgs = c.makeVisits()
for msg in msgs:
messages.info(request, msg)
return redirect("clientDetail", pk=pk)
################################################################################
@login_required
def displayDay(request, year=None, month=None, day=None):
return redirect("displayThisMonth")
################################################################################
@login_required
def clientIndex(request):
return render(request, 'client/client_index.html')
################################################################################
@login_required
def deleteAllClients(request):
for c in Client.objects.all():
for v in Visit.objects.filter(client=c):
v.delete()
c.delete()
return redirect("index")
# EOF
|
dwagon/Scheduler
|
scheduler/client/views.py
|
Python
|
gpl-2.0
| 3,326
|
[
"VisIt"
] |
de2920f7d3fdf59a82bb61e98f2d04a456d06e4272f47ff8918d7c26589c73f9
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011-2012 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <[email protected]>
##
##
"""Stoqlib API
Singleton object which makes it easier to common stoqlib APIs without
having to import their symbols.
"""
import operator
import sys
import glib
from kiwi.component import get_utility
from twisted.internet.defer import inlineCallbacks, returnValue
from stoqlib.database.runtime import (new_store,
get_default_store)
from stoqlib.database.runtime import (get_current_branch,
get_current_station, get_current_user)
from stoqlib.database.settings import db_settings
from stoqlib.domain.person import Branch
from stoqlib.gui.events import CanSeeAllBranches
from stoqlib.lib.environment import is_developer_mode
from stoqlib.lib.interfaces import IStoqConfig
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.settings import get_settings
from stoqlib.lib.translation import locale_sorted, stoqlib_gettext as _
from stoqlib.l10n.l10n import get_l10n_field
class StoqAPI(object):
def get_default_store(self):
return get_default_store()
def new_store(self):
return new_store()
def get_current_branch(self, store):
return get_current_branch(store)
def get_current_station(self, store):
return get_current_station(store)
def get_current_user(self, store):
return get_current_user(store)
@property
def config(self):
return get_utility(IStoqConfig)
@property
def db_settings(self):
return db_settings
@property
def user_settings(self):
return get_settings()
def is_developer_mode(self):
return is_developer_mode()
@property
def async(self):
"""Async API for dialog, it's built on-top of
twisted.It is meant to be used in the following way::
@api.async
def _run_a_dialog(self):
model = yield run_dialog(SomeDialog, parent, store)
If the function returns a value, you need to use
:py:func:`~stoqlib.api.StoqAPI.asyncReturn`, eg::
api.asyncReturn(model)
:returns: a generator
"""
return inlineCallbacks
def asyncReturn(self, value=None):
"""An async API that also returns a value,
see :py:func:`~stoqlib.api.StoqAPI.async` for more information.
:param value: the return value, defaults to None
:returns: a twisted deferred
"""
return returnValue(value)
def get_l10n_field(self, field_name, country=None):
return get_l10n_field(field_name, country=country)
def for_combo(self, resultset, attr=None, empty=None, sorted=True):
"""
Prepares the result of a table for inserting into a combo.
Formats the item and sorts them according to the current locale
:param resultset: a resultset
:param attr: attribute to use instead of :py:class:`~stoqlib.domain.interfaces.IDescribable`
:param empty: if set, add an initial None item with this parameter as
a label
Example::
categories = self.store.find(SellableCategory)
self.category_combo.prefill(api.for_combo(categories,
attr='full_description'))
"""
if attr is not None:
items = [(getattr(obj, attr), obj) for obj in resultset]
else:
# If attr is not specified, the objects in the resultset must
# implement IDescribable
items = [(obj.get_description(), obj) for obj in resultset]
if sorted:
items = locale_sorted(items, key=operator.itemgetter(0))
if empty is not None:
items.insert(0, (empty, None))
return items
def for_person_combo(self, resultset):
"""
This is similar to :py:func:`~stoqlib.api.StoqAPI.for_combo` but
takes a class that references a :py:class:`~stoqlib.domain.person.Person`,
such as a :py:class:`~stoqlib.domain.person.Client`,
:py:class:`~stoqlib.domain.person.Company`,
:py:class:`~stoqlib.domain.person.Supplier` etc.
:param resultset: a resultset
Example::
suppliers = Supplier.get_active_suppliers(self.store)
self.supplier.prefill(api.for_person_combo(suppliers))
"""
from stoqlib.domain.person import Person
from storm import Undef
from storm.expr import Eq
store = resultset._store
facet = resultset._find_spec.default_cls
where = resultset._where
# This is fetching all persons to cache the objects and avoid extra
# queries when constructing the combo strings.
resultset = store.find((Person, facet), Person.id == facet.person_id,
Eq(Person.merged_with_id, None))
if where is not Undef:
resultset = resultset.find(where)
items = [(obj[1].get_description(), obj[1]) for obj in resultset]
# FIXME: A combo only changes to data mode (the one that it
# returns an object insted of the label) when prefilled with
# objects. Prefilling with this fake data will prevent the problem
# from happening. We should fix this on kiwi later
if not items:
return [('', None)]
return locale_sorted(items, key=operator.itemgetter(0))
def can_see_all_branches(self):
can_see = CanSeeAllBranches.emit()
if can_see is not None:
return can_see
return not api.sysparam.get_bool('SYNCHRONIZED_MODE')
def get_branches_for_filter(self, store, use_id=False):
"""Returns a list of branches to be used in a combo.
:param use_id: If True, we will return the options using the object id
instead of the real object.
"""
if not api.can_see_all_branches():
current = self.get_current_branch(store)
if use_id:
value = current.id
else:
value = current
items = [(current.get_description(), value)]
else:
branches = Branch.get_active_branches(store)
if use_id:
items = [(b.get_description(), b.id) for b in branches]
else:
items = [(b.get_description(), b) for b in branches]
items.insert(0, (_("Any"), None))
return items
def escape(self, string):
"""Escapes the text and makes it suitable for use with a
PangoMarkup, usually via Label.set_markup()"""
if string is None:
string = ''
return unicode(glib.markup_escape_text(string))
def prepare_test(self):
"""Prepares to run a standalone test.
This initializes Stoq and creates a store and returns
an example creator.
:returns: an :py:class:`~stoqlib.domain.exampledata.ExampleCreator`
"""
# FIXME: We need to move this into stoqlib
from stoq.gui.shell.bootstrap import boot_shell
from stoq.lib.options import get_option_parser
parser = get_option_parser()
options = parser.parse_args(sys.argv[1:])[0]
options.wizard = False
options.splashscreen = False
options.autoreload = False
options.login_username = u'admin'
options.non_fatal_warnings = False
shell = boot_shell(options, initial=False)
shell._dbconn.connect()
shell._do_login()
from stoqlib.domain.exampledata import ExampleCreator
ec = ExampleCreator()
store = self.new_store()
ec.set_store(store)
return ec
api = StoqAPI()
api.sysparam = sysparam
|
andrebellafronte/stoq
|
stoqlib/api.py
|
Python
|
gpl-2.0
| 8,524
|
[
"VisIt"
] |
6058eeafbb77711e3c9d2342b4cd2b6138135fe881a8a68a15d3d3a97e692bde
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
import collections
import logging
import threading
import itertools
import time
__author__ = 'Brian Quinlan ([email protected])'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._traceback = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
# raise type(self._exception), self._exception, self._traceback
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception_info(self, timeout=None):
"""Return a tuple of (exception, traceback) raised by the call that the
future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
return self.exception_info(timeout)[0]
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception_info(self, exception, traceback):
"""Sets the result of the future as being the given exception
and traceback.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._traceback = traceback
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
self.set_exception_info(exception, None)
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
|
bbfamily/abu
|
abupy/ExtBu/futures/_base.py
|
Python
|
gpl-3.0
| 21,108
|
[
"Brian"
] |
8c96eee148e5ad8f210f0af733005998074d2ad43af296f718be1df0b55d590f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.