prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
from . import _ccallback_c import ctypes PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0] ffi = None class CData(object): pass def _import_cffi(): global ffi, CData if ffi is not None: return try: import cffi ffi = cffi.FFI() CData = ffi.CData except ImportError: ffi = False class LowLevelCallable(tuple): """ Low-level callback function. Parameters ---------- function : {PyCapsule, ctypes function pointer, cffi function pointer} Low-level callback function. user_data : {PyCapsule, ctypes void pointer, cffi void pointer} User data to pass on to the callback function. signature : str, optional Signature of the function. If omitted, determined from *function*, if possible. Attributes ---------- function Callback function given user_data User data given signature Signature of the function. Methods ------- from_cython Class method for constructing callables from Cython C-exported functions. Notes ----- The argument ``function`` can be one of: - PyCapsule, whose name contains the C function signature - ctypes function pointer - cffi function pointer The signature of the low-level callback must match one of those expected by the routine it is passed to. If constructing low-level functions from a PyCapsule, the name of the capsule must be the corresponding signature, in the format:: return_type (arg1_type, arg2_type, ...) For example:: "void (double)" "double (double, int *, void *)" The context of a PyCapsule passed in as ``function`` is used as ``user_data``, if an explicit value for ``user_data`` was not given. """ # Make the class immutable __slots__ = () def __new__(cls, function, user_data=None, signature=None): # We need to hold a reference to the function & user data, # to prevent them going out of scope item = cls._parse_callback(function, user_data, signature) return tuple.__new__(cls, (item, function, user_data)) def __repr__(self): return "LowLevelCallable({!r}, {!r})".format(self.function, self.user_data) @property def function(self): return tuple.__getitem__(self, 1) @property def user_data(self): return tuple.__getitem__(self, 2) @property def signature(self): return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0)) def __getitem__(self, idx): raise ValueError() @classmethod def from_cython(cls, module, name, user_data=None, signature=None): """ Create a low-level callback function from an exported Cython function. Parameters ---------- module : module Cython module where the exported function resides name : str Name of the exported function user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional User data to pass on to the callback function. signature : str, optional Signature of the function. If omitted, determined from *function*. """ try: function = module.__pyx_capi__[name] except AttributeError: raise ValueError("Given module is not a Cython module with __pyx_capi__ attribute") except KeyError: raise ValueError("No function {!r} found in __pyx_capi__ of the module".format(name)) return cls(function, user_data, signature) @classmethod def _parse_callback(cls, obj, user_data=None, signature=None): _import_cffi() if isinstance(obj, LowLevelCallable): func = tuple.__getitem__(obj, 0) elif isinstance(obj, PyCFuncPtr): func, signature = _get_ctypes_func(obj, signature) elif isinstance(obj, CData): func, signature = _get_cffi_func(obj, signature) elif _ccallback_c.check_capsule(obj): func = obj else: raise ValueError("Given input is not a callable or a low-level callable (pycapsule/ctypes/cffi)") if isinstance(user_data, ctypes.c_void_p): context = _get_ctypes_data(user_data) elif isinstance(user_data, CData): context = _get_cffi_data(user_data) elif user_data is None: context = 0 elif _ccallback_c.check_capsule(user_data): context = user_data else: raise ValueError("Given user data is not a valid low-level void* pointer (pycapsule/ctypes/cffi)") return _ccallback_c.get_raw_capsule(func, signature, context) # # ctypes helpers # def _get_ctypes_func(func, signature=None): # Get function pointer func_ptr = ctypes.cast(func, ctypes.c_void_p).value # Construct function signature if signature is None: signature = _typename_from_ctypes(func.restype) + " (" for j, arg in enumerate(func.argtypes): if j == 0: signature += _typename_from_ctypes(arg) else: signature += ", " + _typename_from_ctypes(arg) signature += ")" return func_ptr, signature def _typename_from_ctypes(item): if item is None: return "void" elif item is ctypes.c_void_p: return "void *" name = item.__name__ pointer_level = 0 while name.startswith("LP_"): pointer_level += 1 name = name[3:] if name.startswith('c_'): name = name[2:
] if pointer_level > 0: name += " " + "*"*pointer_level return name def _get_ctypes_data(data): # Get voidp pointer return ctypes.cast(data, ctypes.c_void_p).value # # CFFI helpers # def _get_cffi_func(func, signature=None): # Get function point
er func_ptr = ffi.cast('uintptr_t', func) # Get signature if signature is None: signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ') return func_ptr, signature def _get_cffi_data(data): # Get pointer return ffi.cast('uintptr_t', data)
#!/usr/bin/env python # coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ class JsonErrorResponse(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self):
""" Swagger model :param dict
swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'status': 'str', 'message': 'str' } self.attribute_map = { 'status': 'status', 'message': 'message' } # Status: \&quot;ok\&quot; or \&quot;error\&quot; self.status = None # str # Error message self.message = None # str def __repr__(self): properties = [] for p in self.__dict__: if p != 'swaggerTypes' and p != 'attributeMap': properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p])) return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
# -*- coding: utf-8 -*- appid = 'example' apikey = 'c5dd7e7dkjp27377l903c42c032b413b' sender = '01000000000'
# FIXME - MUST BE CHANGED AS REAL PHONE NUMBER receivers = ['01000000000', ]
# FIXME - MUST BE CHANGED AS REAL PHONE NUMBERS content = u'나는 유리를 먹을 수 있어요. 그래도 아프지 않아요'
## # Copyright 2009-2017 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for software that is configured with CMake, implemented as an easyblock @author: Stijn De Weirdt (Ghent University) @author: Dries Verdegem (Ghent University) @author: Kenneth Hoste (Ghent University) @author: Pieter De Baets (Ghent University) @author: Jens Timmerman (Ghent University) @author: Ward Poelmans (Ghent University) """ import os from easybuild.easyblocks.generic.configuremake import ConfigureMake from easybuild.framework.easyconfig import CUSTOM from easybuild.tools.build_log import EasyBuildError from easybuild.tools.config import build_option from easybuild.tools.environment import setvar from easybuild.tools.run import run_cmd class CMakeMake(ConfigureMake): """Support for configuring build with CMake instead of traditional configure script""" @staticmethod def extra_options(extra_vars=None): """Define extra easyconfig parameters specific to CMakeMake.""" extra_vars = ConfigureMake.extra_options(extra_vars) extra_vars.update({ 'srcdir': [None, "Source directory location to provide to cmake command", CUSTOM], 'separate_build_dir': [False, "Perform build in a separate directory", CUSTOM], }) return extra_vars def configure_step(self, srcdir=None, builddir=None): """Configure build using cmake""" if builddir is not None: self.log.nosupport("CMakeMake.configure_step: named argument 'builddir' (should be 'srcdir')", "2.0") # Set the search paths for CMake include_paths = os.pathsep.join(self.toolchain.get_variable("CPPFLAGS", list)) library_paths = os.pathsep.join(self.toolchain.get_variable("LDFLAGS", list)) setvar("CMAKE_INCLUDE_PATH", include_paths) setvar("CMAKE_LIBRARY_PATH", library_paths) default_srcdir = '.' if self.cfg.get('separate_build_dir', False): objdir = os.path.join(self.builddir, 'easybuild_obj') try: os.mkdir(objdir) os.chdir(objdir) except OSError, err: raise EasyBuildError("Failed to create separate build dir %s in %s: %s", objdir, os.getcwd(), err) default_srcdir = self.cfg['start_dir'] if srcdir is None: if self.cfg.get('srcdir', None) is not None: srcdir = self.cfg['srcdir'] else: srcdir = default_srcdir options = ['-DCMAKE_INSTALL_PREFIX=%s' % self.installdir] env_to_options = { 'CC': 'CMAKE_C_COMPILER', 'CFLAGS': 'CMAKE_C_FLAGS', 'CXX': 'CMAKE_CXX_COMPILER', 'CXXFLAGS': 'CMAKE_CXX_FLAGS', 'F90': 'CMAKE_Fortran_COMPILER', 'FFLAGS': 'CMAKE_Fortran_FLAGS', } for env_name, option in env_to_options.items(): value = os.getenv(env_name) if value is not None: options.append("-D%s='%s'" % (option, value)) if build_option('rpath'): # instruct CMake not to fiddle with RPATH when --rpath is used, since it will undo stuff on install... # https://github.com/LLNL/spack/blob/0f6a5cd38538e8969d11bd2167f11060b1f53b43/
lib/spack/spack/build_environment.py#L416 options.append('-DCMAKE_SKIP_RPATH=ON') # show what CMake is doing by default options.append('-DCMAKE_VERBOSE_MAKEFILE=ON') options_string = ' '.join(options) command = "%s cmake %s %s %s" % (self.cfg['preconfigopts'], srcdir, options_string, s
elf.cfg['configopts']) (out, _) = run_cmd(command, log_all=True, simple=False) return out
# coding=utf-8 from __future__ import unicode_literals """ Name: MyArgparse Author: Andy Liu Email : [email protected] Created: 3/26/2015 Copyright: All rights reserved. Licence: This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu
.org/licenses/>. """ import argparse import logging def parse_command_line(): parser = argparse.ArgumentParser(prog='PROG', description='%(prog)s can ...') parser.add_argument('NoPre', action="store", help='help information') parser.add_argument('-t', action="store_true",
dest='boolean_switch', default=False, help='Set a switch to true') parser.add_argument('-f', action="store_false", dest='boolean_switch', default=True, help='Set a switch to false') parser.add_argument('-s', action="store", dest='simple_value', help="Store a simple value") parser.add_argument('-st', action="store", dest="simple_value", type=int, help='Store a simple value and define type') parser.add_argument('-c', action='store_const', dest='constant_value', const='value-to-store', help='Store a constant value') parser.add_argument('-a', action='append', dest='collection', default=[], help='Add repeated values to a list') parser.add_argument('-A', action='append_const', dest='const_collection', const='value-1-to-append', default=[], help='Add different values to list') parser.add_argument('-B', action='append_const', dest='const_collection', const='value-2-to-append', help='Add different values to list') args = parser.parse_args() logging.debug('NoPre = %r' % args.NoPre) logging.debug('simple_value = %r' % args.simple_value) logging.debug('constant_value = %r' % args.constant_value) logging.debug('boolean_switch = %r' % args.boolean_switch) logging.debug('collection = %r' % args.collection) logging.debug('const_collection = %r' % args.const_collection) return args if __name__ == '__main__': from MyLog import init_logger logger = init_logger() parse_command_line()
from dj
ango.contrib import admin from trainer.models import Language, Word, Card, Set admin.site.register(Language) admin.site.register(Word) admin.site.register(Card) admin.site.regis
ter(Set)
oint registration or to quit event_first_of( event_healthy, event_stop, ).wait() while True: data_or_stop.wait() if event_stop.is_set(): return # The queue is not empty at this point, so this won't raise Empty. # This task being the only consumer is a requirement. data = queue.peek(block=False) backoff = timeout_exponential_backoff( message_retries, message_retry_timeout, message_retry
_max_timeout, ) acknowledged = retry_with_recovery( protocol, data, receiver_address, event_stop, event_healthy, event_unhealthy, backoff, ) i
f acknowledged: queue.get() # Checking the length of the queue does not trigger a # context-switch, so it's safe to assume the length of the queue # won't change under our feet and when a new item will be added the # event will be set again. if not queue: data_or_stop.clear() if event_stop.is_set(): return def healthcheck( protocol, receiver_address, event_stop, event_healthy, event_unhealthy, nat_keepalive_retries, nat_keepalive_timeout, nat_invitation_timeout, ping_nonce): """ Sends a periodical Ping to `receiver_address` to check its health. """ # The state of the node is unknown, the events are set to allow the tasks # to do work. protocol.set_node_network_state( receiver_address, NODE_NETWORK_UNKNOWN, ) # Always call `clear` before `set`, since only `set` does context-switches # it's easier to reason about tasks that are waiting on both events. # Wait for the end-point registration or for the node to quit try: protocol.get_host_port(receiver_address) except UnknownAddress: event_healthy.clear() event_unhealthy.set() backoff = timeout_exponential_backoff( nat_keepalive_retries, nat_keepalive_timeout, nat_invitation_timeout, ) sleep = next(backoff) while not event_stop.wait(sleep): try: protocol.get_host_port(receiver_address) except UnknownAddress: sleep = next(backoff) else: break # Don't wait to send the first Ping and to start sending messages if the # endpoint is known sleep = 0 event_unhealthy.clear() event_healthy.set() while not event_stop.wait(sleep): sleep = nat_keepalive_timeout ping_nonce['nonce'] += 1 data = protocol.get_ping( ping_nonce['nonce'], ) # Send Ping a few times before setting the node as unreachable acknowledged = retry( protocol, data, receiver_address, event_stop, [nat_keepalive_timeout] * nat_keepalive_retries, ) if event_stop.is_set(): return if not acknowledged: # The node is not healthy, clear the event to stop all queue # tasks protocol.set_node_network_state( receiver_address, NODE_NETWORK_UNREACHABLE, ) event_healthy.clear() event_unhealthy.set() # Retry until recovery, used for: # - Checking node status. # - Nat punching. acknowledged = retry( protocol, data, receiver_address, event_stop, repeat(nat_invitation_timeout), ) if acknowledged: event_unhealthy.clear() event_healthy.set() protocol.set_node_network_state( receiver_address, NODE_NETWORK_REACHABLE, ) class RaidenProtocol(object): """ Encode the message into a packet and send it. Each message received is stored by hash and if it is received twice the previous answer is resent. Repeat sending messages until an acknowledgment is received or the maximum number of retries is hit. """ def __init__( self, transport, discovery, raiden, retry_interval, retries_before_backoff, nat_keepalive_retries, nat_keepalive_timeout, nat_invitation_timeout): self.transport = transport self.discovery = discovery self.raiden = raiden self.retry_interval = retry_interval self.retries_before_backoff = retries_before_backoff self.nat_keepalive_retries = nat_keepalive_retries self.nat_keepalive_timeout = nat_keepalive_timeout self.nat_invitation_timeout = nat_invitation_timeout self.event_stop = Event() self.channel_queue = dict() # TODO: Change keys to the channel address self.greenlets = list() self.addresses_events = dict() self.nodeaddresses_networkstatuses = defaultdict(lambda: NODE_NETWORK_UNKNOWN) # Maps the echohash of received and *sucessfully* processed messages to # its Ack, used to ignored duplicate messages and resend the Ack. self.receivedhashes_to_acks = dict() # Maps the echohash to a SentMessageState self.senthashes_to_states = dict() # Maps the addresses to a dict with the latest nonce (using a dict # because python integers are immutable) self.nodeaddresses_to_nonces = dict() cache = cachetools.TTLCache( maxsize=50, ttl=CACHE_TTL, ) cache_wrapper = cachetools.cached(cache=cache) self.get_host_port = cache_wrapper(discovery.get) def start(self): self.transport.start() def stop_and_wait(self): # Stop handling incoming packets, but don't close the socket. The # socket can only be safely closed after all outgoing tasks are stopped self.transport.stop_accepting() # Stop processing the outgoing queues self.event_stop.set() gevent.wait(self.greenlets) # All outgoing tasks are stopped. Now it's safe to close the socket. At # this point there might be some incoming message being processed, # keeping the socket open is not useful for these. self.transport.stop() # Set all the pending results to False for waitack in self.senthashes_to_states.itervalues(): waitack.async_result.set(False) def get_health_events(self, receiver_address): """ Starts a healthcheck taks for `receiver_address` and returns a HealthEvents with locks to react on its current state. """ if receiver_address not in self.addresses_events: self.start_health_check(receiver_address) return self.addresses_events[receiver_address] def start_health_check(self, receiver_address): """ Starts a task for healthchecking `receiver_address` if there is not one yet. """ if receiver_address not in self.addresses_events: ping_nonce = self.nodeaddresses_to_nonces.setdefault( receiver_address, {'nonce': 0}, # HACK: Allows the task to mutate the object ) events = HealthEvents( event_healthy=Event(), event_unhealthy=Event(), ) self.addresses_events[receiver_address] = events self.greenlets.append(gevent.spawn( healthcheck, self, receiver_address, self.event_stop, events.event_healthy, events.event_unhealthy, self.nat_keepalive_retries, self.nat_keepalive_timeout, self.nat_invitation_timeout, ping_nonce, ))
# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy) # P.O. Box 2, 7990 AA Dwingeloo, The Netherlands # # This file is part of the LOFAR software suite. # The LOFAR software suite is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published # by the Free Software Foundation, eit
her version 3 of the License, or # (at your option) any later version. # # The LOF
AR software suite is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. '''default config for webservice''' DEBUG = False JSONIFY_PRETTYPRINT_REGULAR = False print('default config loaded')
convertPrio(cal.vtodo.priority.value) if hasattr(cal.vtodo, 'priority') else None } fakePOST = QueryDict(mutable=True) fakePOST.update(params) form = SimpleTickets(fakePOST) if form.is_valid(): cd = form.cleaned_data ticket = get_ticket_model() # change ticket try: tic = ticket.objects.get(uuid=cal.vtodo.uid.value) tic.caption = cd['caption'] tic.description = cd['description'] tic.priority = cd['priority'] # tic.assigned = cd['assigned'] tic.show_start = cd['show_start'] tic.save(user=request.user) # new ticket except ticket.DoesNotExist: tic = ticket() tic.caption = cd['caption'] tic.description = cd['description'] if 'priority' not in cd or not cd['priority']: if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_PRIORITY') and settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY: tic.priority_id = settings.KEEP_IT_SIMPLE_DEFAULT_PRIORITY else: tic.priority = cd['priority'] tic.assigned = request.user if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_CUSTOMER') and settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER: if settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOMER == -1: tic.customer = request.organisation
else: tic.customer_id = settings.KEEP_IT_SIMPLE_DEFAULT_CUSTOME if hasattr(settings, 'KEEP_IT_SIMPLE_DEFAULT_COMPONENT') and settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT: tic.component_id = settings.KEEP_IT_SIMPLE_DEFAULT_COMPONENT tic.show_start = cd['show_start']
tic.uuid = cal.vtodo.uid.value tic.save(user=request.user) if tic.assigned: touch_ticket(tic.assigned, tic.pk) for ele in form.changed_data: form.initial[ele] = '' remember_changes(request, form, tic) touch_ticket(request.user, tic.pk) mail_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_MAIL_RCPT, is_api=True) jabber_ticket(request, tic.pk, form, rcpt=settings.TICKET_NEW_JABBER_RCPT, is_api=True) else: raise Exception(form.errors) def remove(self, name): pass def replace(self, name, text): self.append(name, text) @property def text(self): return ical.serialize(self.tag, self.headers, self.items.values()) @classmethod def children(cls, path): """Yield the children of the collection at local ``path``.""" request = cls._getRequestFromUrl(path) children = list(tickets_reports.objects.filter(active_record=True, c_user=request.user).values_list('slug', flat=True)) children = ['%s/%s.ics' % (request.user.username, itm) for itm in children] return map(cls, children) @classmethod def is_node(cls, path): """Return ``True`` if relative ``path`` is a node. A node is a WebDAV collection whose members are other collections. """ request = cls._getRequestFromUrl(path) if path == request.user.username: return True else: return False @classmethod def is_leaf(cls, path): """Return ``True`` if relative ``path`` is a leaf. A leaf is a WebDAV collection whose members are not collections. """ result = False if '.ics' in path: try: request = cls._getRequestFromUrl(path) rep = tickets_reports.objects.get(active_record=True, pk=cls._getReportFromUrl(path)) tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all() search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search)) result = (tic.exists()) except Exception: import sys a = sys.exc_info() return result @property def last_modified(self): try: request = self._getRequestFromUrl(self.path) rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path)) tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all() search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search)) date = tic.latest('u_date') return datetime.strftime( date.last_action_date, '%a, %d %b %Y %H:%M:%S %z') except Exception: import sys a = sys.exc_info() @property def tag(self): with self.props as props: if 'tag' not in props: props['tag'] = 'VCALENDAR' return props['tag'] @property @contextmanager def props(self): # On enter properties = {} try: props = DBProperties.objects.get(path=self.path) except DBProperties.DoesNotExist: pass else: properties.update(json.loads(props.text)) old_properties = properties.copy() yield properties # On exit if old_properties != properties: props, created = DBProperties.objects.get_or_create(path=self.path) props.text = json.dumps(properties) props.save() @property def items(self): itms = {} try: request = self._getRequestFromUrl(self.path) if self.path == request.user.username: return itms rep = tickets_reports.objects.get(active_record=True, pk=self._getReportFromUrl(self.path)) tic = get_ticket_model().objects.select_related('type', 'state', 'assigned', 'priority', 'customer').all() search_params, tic = build_ticket_search_ext(request, tic, json.loads(rep.search)) for item in tic: text = self._itemToICal(item) itms.update(self._parse(text, ICAL_TYPES)) except Exception: import sys a = sys.exc_info() return itms @classmethod def _getRequestFromUrl(cls, path): user = path.split('/')[0] request = FakeRequest() request.user = User.objects.get(username=user) request.organisation = UserProfile.objects.get(user=request.user).organisation return request @classmethod def _getReportFromUrl(cls, path): if '.ics' in path: file = path.split('/')[-1] file = file.replace('.ics', '') repid = tickets_reports.objects.get(active_record=True, slug=file).pk return repid return 0 @classmethod def _itemToICal(cls, item): cal = vobject.iCalendar() cal.add('vtodo') cal.vtodo.add('summary').value = item.caption cal.vtodo.add('uid').value = str(item.uuid) cal.vtodo.add('created').value = item.c_date if item.closed: cal.vtodo.add('status').value = 'COMPLETED' if item.priority: cal.vtodo.add('priority').value = str(item.priority.caldav) else: cal.vtodo.add('priority').value = '0' if item.description: cal.vtodo.add('description').value = item.description if item.show_start: # cal.vtodo.add('dstart').value = item.show_start cal.vtodo.add('due').value = item.show_start cal.vtodo.add('vala
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ fr
om mcfw.properties import bool_property, unicode_list_property, unicode_property, typed_property class BankDataTO(object): bankCode = unicode_property('bankCode') name = unicode_property('name') bic = unicode_property('bic') class OpenIbanResultTO(object): valid = bool_property('valid') messages = unicode_list_property('message') iban = unicode_property('iban')
bankData = typed_property('bankData', BankDataTO) # type: BankDataTO checkResults = typed_property('checkResults', dict)
# -*- coding: utf-8 -*- # Copyright(C) 2012 Romain Bignon # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it
under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public L
icense for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from .module import CmsoModule __all__ = ['CmsoModule']
if self.status == self.INACTIVE: # If these fields have already been changed, don't # override those changes. Don't unset the name field # if no further data is available. if self.name == self.feed_url: self.name = video_iter.title or self.name if not self.webpage: self.webpage = video_iter.webpage or '' if not self.description: self.description = video_iter.description or '' self.save() super(Feed, self).update(video_iter, source_import=feed_import, **kwargs) def source_type(self): return self.calculated_source_type def _calculate_source_type(self): video_service = self.video_service() if video_service is None: return u'Feed' else: return u'User: %s' % video_service def video_service(self): for service, regexp in VIDEO_SERVICE_REGEXES: if re.search(regexp, self.feed_url, re.I): return service def pre_save_set_calculated_source_type(instance, **kwargs): # Always save the calculated_source_type instance.calculated_source_type = instance._calculate_source_type() # Plus, if the name changed, we have to recalculate all the Videos that depend on us. try: v = Feed.objects.get(id=instance.id) except Feed.DoesNotExist: return instance if v.name != instance.name: # recalculate all the sad little videos' calculated_source_type for vid in instance.video_set.all(): vid.save() models.signals.pre_save.connect(pre_save_set_calculated_source_type, sender=Feed) class Category(MPTTModel): """ A category for videos to be contained in. Categories and tags aren't too different functionally, but categories are more strict as they can't be defined by visitors. Categories can also be hierarchical. Fields: - site: A link to the django.contrib.sites.models.Site object this object is bound to - name: Name of this category - slug: a slugified verison of the name, used to create more friendly URLs - logo: An image to associate with this category - description: human readable description of this item - parent: Reference to another Category. Allows you to have heirarchical categories. """ site = models.ForeignKey(Site) name = models.CharField( max_length=80, verbose_name='Category Name', help_text=_("The name is used to identify the category almost " "everywhere; for example, under a video or in a " "category widget.")) slug = models.SlugField( verbose_name='Category Slug', help_text=_("The \"slug\" is the URL-friendly version of the name. It " "is usually lower-case and contains only letters, numbers " "and hyphens.")) logo = models.ImageField( upload_to=utils.UploadTo('localtv/category/logo/%Y/%m/%d/'), blank=True, verbose_name='Thumbnail/Logo', help_text=_("Optional. For example: a leaf for 'environment' or the " "logo of a university department.")) description = models.TextField( blank=True, verbose_name='Description (HTML)', help_text=_("Optional. The description is not prominent by default, but" " some themes may
show it.")) parent = models.ForeignKey( 'self', blank=True, null=True, related_name='child_set', verbose_name='Category Parent', help_text=_("Categories, unlike tags, can have a hierarchy.")) class MPTTMeta: order_insertion_by = ['name'] class Meta: unique_together = ( ('slug', 'site'), ('name', 'site')) def __unicode__(se
lf): return self.name def dashes(self): """ Returns a string of em dashes equal to the :class:`Category`\ 's level. This is used to indent the category name in the admin templates. """ return mark_safe('&mdash;' * self.level) @models.permalink def get_absolute_url(self): return ('localtv_category', [self.slug]) def approved_set(self): """ Returns active videos for the category and its subcategories, ordered by decreasing best date. """ opts = self._mptt_meta lookups = { 'status': Video.ACTIVE, 'categories__left__gte': getattr(self, opts.left_attr), 'categories__left__lte': getattr(self, opts.right_attr), 'categories__tree_id': getattr(self, opts.tree_id_attr) } lookups = self._tree_manager._translate_lookups(**lookups) return Video.objects.filter(**lookups).distinct() approved_set = property(approved_set) def unique_error_message(self, model_class, unique_check): return 'Category with this %s already exists.' % ( unique_check[0],) class SavedSearch(Source): """ A set of keywords to regularly pull in new videos from. There's an administrative interface for doing "live searches" Fields: - site: site this savedsearch applies to - query_string: a whitespace-separated list of words to search for. Words starting with a dash will be processed as negative query terms - when_created: date and time that this search was saved. """ query_string = models.TextField() when_created = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.query_string def update(self, **kwargs): """ Fetch and import new videos from this search. """ try: SearchImport.objects.get(source=self, status=SearchImport.STARTED) except SearchImport.DoesNotExist: pass else: logging.info('Skipping import of %s: already in progress' % self) return search_import = SearchImport.objects.create( source=self, auto_approve=self.auto_approve ) searches = vidscraper.auto_search( self.query_string, max_results=100, api_keys=lsettings.API_KEYS, ) video_iters = [] for video_iter in searches: try: video_iter.load() except Exception: search_import.handle_error(u'Skipping import of search results ' u'from %s' % video_iter.__class__.__name__, with_exception=True) continue video_iters.append(video_iter) if video_iters: super(SavedSearch, self).update(itertools.chain(*video_iters), source_import=search_import, **kwargs) else: # Mark the import as failed if none of the searches could load. search_import.fail("All searches failed for {source}", with_exception=False) def source_type(self): return u'Search' class SourceImportIndex(models.Model): video = models.OneToOneField('Video', unique=True) index = models.PositiveIntegerField(blank=True, null=True) class Meta: abstract = True class FeedImportIndex(SourceImportIndex): source_import = models.ForeignKey('FeedImport', related_name='indexes') class SearchImportIndex(SourceImportIndex): source_import = models.ForeignKey('SearchImport', related_name='indexes') class SourceImportError(models.Model): message = models.TextField() traceback = models.TextField(blank=True) is_skip = models.BooleanField(help_text="Whether this error represents a " "video that was skipped.") datetime = models.DateTimeField(auto_now_add=True) class Meta: abstract = True class FeedImportError(SourceImportError):
from rest_framework import exceptions as drf_exceptions from rest_framework import versioning as drf_versioning from rest_framework.compat import unicode_http_header from rest_framework.utils.mediatypes import _MediaType from api.base import exceptions from api.base import utils from api.base.renderers import BrowsableAPIRendererNoForms from api.base.settings import LATEST_VERSIONS def get_major_version(version): return int(version.split('.')[0]) def url_path_version_to_decimal(url_path_version): # 'v2' --> '2.0' return str(float(url_path_version.split('v')[1])) def decimal_version_to_url_path(decimal_version): # '2.0' --> 'v2' return 'v{}'.format(get_major_version(decimal_version)) def get_latest_sub_version(major_version): # '2' --> '2.6' return LATEST_VERSIONS.get(major_version, None) class BaseVersioning(drf_versioning.BaseVersioning): def __init__(self): super(BaseVersioning, self).__init__() def get_url_path_version(self, kwargs): invalid_version_message = 'Invalid version in URL path.' version = kwargs.get(self.version_param) if version is None: return self.default_version version = url_path_version_to_decimal(version) if not self.is_allowed_version(version): raise drf_exceptions.NotFound(invalid_version_message) if get_major_version(version) == get_major_version(self.default_version): return self.default_version return version def get_header_version(self, request, major_version): invalid_version_message = 'Invalid version in "Accept" header.' media_type = _MediaType(request.accepted_media_type) version = media_type.params.get(self.version_param) if not version: return None if version == 'latest': return get_latest_sub_version(major_version) version = unicode_http_header(version) if not self.is_allowed_version(version): raise drf_exceptions.NotAcceptable(invalid_version_message) return version def get_default_version(self, request, major_version): """Returns the latest available version for the browsable api, otherwise REST_FRAMEWORK default version""" if request.accepted_renderer.__class__ == BrowsableAPIRendererNoForms: return get_latest_sub_version(major_version) return self.default_version def get_query_param_version(self, request, major_version): invalid_version_message = 'Invalid version in query parameter.' version = request.query_params.get(self.version_param) if not version: return None if version == 'latest': return get_latest_sub_version(major_version) if not self.is_allowed_version(version): raise drf_exceptions.NotFound(invalid_version_message) return version def validate_pinned_versions(self, url_path_version, header_version, query_parameter_version): url_path_major_version = get_major_version(url_path_version) header_major_version = get_major_version(header_version) if header_version else None query_major_version = get_major_version(query_parameter_version) if query_parameter_version else None if header_version and header_major_version != url_path_major_version: raise exceptions.Conflict( detail='Version {} specified in "Accept" header does not fall within URL path version {}'.format( header_version, url_path_version, ), ) if query_parameter_version and query_major_version != url_path_major_version: raise exceptions.Conflict( detail='Version {} specified in query parameter does not fall within URL path version {}'.format( query_parameter_version, url_path_version, ), ) if header_version and query_parameter_version and (header_version != query_parameter_version): raise exceptions.Conflict( detail='Version {} specified in "Accept" header does not match version {} specified in query parameter'.format( header_version, query_parameter_version, ), ) def determine_version(self, request, *args, **kwargs): url_path_version = self.get_url_path_version(kwargs) major_version = get_major_version(url_path_version) header_version = self.get_header_version(request, major_version) query_parameter_version = self.get_query_param_version(request, major_version) version = url_path_version if header_version or query_parameter_version: self.validate_pinned_versions(url_path_version, header_version, query_parameter_version) version = header_version if header_version else query_parameter_version else: version = self.get_default_version(request, major_version) return version def re
verse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra): url_path_version = self.get_url_path_version(kwargs) major_version = get_major_version(url_path_version) query_parameter_version = self.get_query_param_version(request, major_version) kwargs = {} if (kwargs is None) else kwargs kwargs[self.version_param] = decimal_version_to_url_path(url_path_version) query_kwargs = {'ve
rsion': query_parameter_version} if query_parameter_version else None return utils.absolute_reverse( viewname, query_kwargs=query_kwargs, args=args, kwargs=kwargs, )
# -*- coding: utf-8 -*- from module.plugins.internal.XFSAccount import XFSAccount class FilerioCom(XFSAccount): __name__ = "FilerioCom" __type__ = "ac
count" __version__ = "0.07" __status__ = "testing" __description__ = """FileRio.in account plugin""" __license__ = "GP
Lv3" __authors__ = [("zoidberg", "[email protected]")] PLUGIN_DOMAIN = "filerio.in"
import pytest from ray.train.callbacks.results_preprocessors import ( ExcludedKeysResultsPreprocessor, IndexedResultsPreprocessor, SequentialResultsPreprocessor, AverageResultsPreprocessor, MaxResultsPreprocessor, WeightedAverageResultsPreprocessor, ) def test_excluded_keys_results_preprocessor(): results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}] expected = [{"b": 2}, {"b": 4}] preprocessor = ExcludedKeysResultsPreprocessor("a") preprocessed_results = preprocessor.preprocess(results) assert preprocessed_results == expected def test_indexed_results_preprocessor(): results = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}] expected = [{"a": 1}, {"a": 3}] preprocessor = IndexedResultsPreprocessor([0, 2]) preprocessed_results = preprocessor.preprocess(results) assert preprocessed_results == expected def test_sequential_results_preprocessor(): results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}] expected = [{"b": 2}, {"b": 6}] preprocessor_1 = ExcludedKeysResultsPreprocessor("a") # [{"b": 2}, {"b": 4}, {"b": 6}, {"b": 8}] preprocessor_2 = IndexedResultsPreprocessor([0, 2]) preprocessor = SequentialResultsPreprocessor([preprocessor_1, preprocessor_2]) preprocessed_results = preprocessor.preprocess(results) assert preprocessed_results == expected def test_average_results_preprocessor(): from copy import deepcopy import numpy as np results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}] expected = deepcopy(results) for res in expected: res.update( { "avg(a)": np.mean([result["a"] for result in results]), "avg(b)": np.mean([result["b"] for result in results]), } ) preprocessor = AverageResultsPreprocessor(["a", "b"]) preprocessed_results = preprocessor.preprocess(results) assert preprocessed_results == expected def test_max_results_preprocessor(): from copy import deepcopy import numpy as np results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}] expected = deepcopy(results) for res in expected: res.update( { "max(a)": np.max([result["a"] for result in results]), "max(b)": np.max([result["b"] for result in results]), } ) preprocessor = MaxResultsPreprocessor(["a", "b"]) preprocessed_results = preprocessor.preproce
ss(results) assert preprocessed_results == expected def test_weighted_average_results_preprocessor(): from copy import deepcopy import numpy
as np results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}] expected = deepcopy(results) total_weight = np.sum([result["b"] for result in results]) for res in expected: res.update( { "weight_avg_b(a)": np.sum( [result["a"] * result["b"] / total_weight for result in results] ) } ) preprocessor = WeightedAverageResultsPreprocessor(["a"], "b") preprocessed_results = preprocessor.preprocess(results) assert preprocessed_results == expected @pytest.mark.parametrize( ("results_preprocessor", "expected_value"), [(AverageResultsPreprocessor, 2.0), (MaxResultsPreprocessor, 3.0)], ) def test_warning_in_aggregate_results_preprocessors( caplog, results_preprocessor, expected_value ): import logging from copy import deepcopy from ray.util import debug caplog.at_level(logging.WARNING) results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}] results2 = [{"a": 1}, {"a": "invalid"}, {"a": 3}, {"a": "invalid"}] results3 = [{"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}, {"a": "invalid"}] results4 = [{"a": 1}, {"a": 2}, {"a": 3}, {"c": 4}] # test case 1: metric key `b` is missing from all workers results_preprocessor1 = results_preprocessor(["b"]) results_preprocessor1.preprocess(results1) assert "`b` is not reported from workers, so it is ignored." in caplog.text # test case 2: some values of key `a` have invalid data type results_preprocessor2 = results_preprocessor(["a"]) expected2 = deepcopy(results2) aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a") for res in expected2: res.update({aggregation_key: expected_value}) assert results_preprocessor2.preprocess(results2) == expected2 # test case 3: all key `a` values are invalid results_preprocessor2.preprocess(results3) assert "`a` value type is not valid, so it is ignored." in caplog.text # test case 4: some workers don't report key `a` expected4 = deepcopy(results4) aggregation_key = results_preprocessor2.aggregate_fn.wrap_key("a") for res in expected4: res.update({aggregation_key: expected_value}) assert results_preprocessor2.preprocess(results4) == expected4 for record in caplog.records: assert record.levelname == "WARNING" debug.reset_log_once("b") debug.reset_log_once("a") def test_warning_in_weighted_average_results_preprocessors(caplog): import logging from copy import deepcopy caplog.at_level(logging.WARNING) results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}] results2 = [{"b": 1}, {"b": 2}, {"b": 3}, {"b": 4}] results3 = [ {"a": 1, "c": 3}, {"a": 2, "c": "invalid"}, {"a": "invalid", "c": 1}, {"a": 4, "c": "invalid"}, ] results4 = [ {"a": 1, "c": "invalid"}, {"a": 2, "c": "invalid"}, {"a": 3, "c": "invalid"}, {"a": 4, "c": "invalid"}, ] # test case 1: weight key `b` is not reported from all workers results_preprocessor1 = WeightedAverageResultsPreprocessor(["a"], "b") expected1 = deepcopy(results1) for res in expected1: res.update({"weight_avg_b(a)": 2.5}) assert results_preprocessor1.preprocess(results1) == expected1 assert ( "Averaging weight `b` is not reported by all workers in `train.report()`." in caplog.text ) assert "Use equal weight instead." in caplog.text # test case 2: metric key `a` (to be averaged) is not reported from all workers results_preprocessor1.preprocess(results2) assert "`a` is not reported from workers, so it is ignored." in caplog.text # test case 3: both metric and weight keys have invalid data type results_preprocessor2 = WeightedAverageResultsPreprocessor(["a"], "c") expected3 = deepcopy(results3) for res in expected3: res.update({"weight_avg_c(a)": 1.0}) assert results_preprocessor2.preprocess(results3) == expected3 # test case 4: all weight values are invalid expected4 = deepcopy(results4) for res in expected4: res.update({"weight_avg_c(a)": 2.5}) assert results_preprocessor2.preprocess(results4) == expected4 assert "Averaging weight `c` value type is not valid." in caplog.text for record in caplog.records: assert record.levelname == "WARNING" if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", "-x", __file__]))
import re from django.core.exceptions import ImproperlyConfigured from sqlalchemy import create_engine, MetaData from sqlalchemy.orm import sessionmaker from tranquil.models import Importer __all__ = ( 'engine', 'meta', 'Session', ) class EngineCache(object): __shared_state = dict( engine = None, meta = None, Session = None, ) _mappings = { 'sqlite3': 'sqlite', 'mysql': 'mysql', 'postgresql': 'postgresql', 'postgresql_psycopg2': 'postgresql+psycopg2', 'oracle': 'oracle', } def __init__(self): from django.conf import settings self.__dict__ = self.__shared_state if self.engine is not None: return if settings.DATABASE_ENGINE == 'django_sqlalchemy.backend': from django_sqlalchemy import backend
self.engine = backend.engine else: options = { 'protocol': self._mappings.get( settings.DATABASE_ENGINE ), 'name': settings.DATABASE_NAME, 'user': settings.DATABASE_USER,
'pass': settings.DATABASE_PASSWORD, 'host': settings.DATABASE_HOST, 'port': settings.DATABASE_PORT, } if options['protocol'] is None: raise ImproperlyConfigured( 'Unknown database engine: %s' % settings.DATABASE_ENGINE ) url = '{protocol}://{user}:{pass}@{host}{port}/{name}' for p in options: if p == 'port' and len( options[p] ) > 0: url = re.sub( '{%s}' % p, ':%s' % options[p], url ) else: url = re.sub( '{%s}' % p, options[p], url ) self.engine = create_engine( url ) self.meta = MetaData(bind=self.engine,reflect=True) self.Session = sessionmaker( bind=self.engine, autoflush=True, autocommit=False ) self.importer = Importer(self.meta) cache = EngineCache() engine = cache.engine meta = cache.meta Session = cache.Session
""" Tests outgoing calls created with InitialAudio and/or InitialVideo, and exposing the initial contents of incoming calls as values of InitialAudio and InitialVideo """ import operator from servicetest import ( assertContains, assertEquals, assertLength, wrap_channel, EventPattern, call_async, make_channel_proxy) from jingletest2 import JingleTest2, test_all_dialects import constants as cs def outgoing(jp, q, bus, conn, stream): remote_jid = '[email protected]/beyond' jt = JingleTest2(jp, conn, q, stream, 'test@localhost', remote_jid) jt.prepare() self_handle = conn.GetSelfHandle() remote_handle = conn.RequestHandles(cs.HT_CONTACT, [remote_jid])[0] rccs = conn.Properties.Get(cs.CONN_IFACE_REQUESTS, 'RequestableChannelClasses') media_classes = [ rcc for rcc in rccs if rcc[0][cs.CHANNEL_TYPE] == cs.CHANNEL_TYPE_STREAMED_MEDIA ] assertLength(1, media_classes) fixed, allowed = media_classes[0] assertContains(cs.INITIAL_AUDIO, allowed) assertContains(cs.INITIAL_VIDEO, allowed) check_neither(q, conn, bus, stream, remote_handle) check_iav(jt, q, conn, bus, stream, remote_handle, True, False) check_iav(jt, q, conn, bus, stream, remote_handle, False, True) check_iav(jt, q, conn, bus, stream, remote_handle, True, True) def check_neither(q, conn, bus, stream, remote_handle): """ Make a channel without specifying InitialAudio or InitialVideo; check that it's announced with both False, and that they're both present and false in GetAll(). """ path, props = conn.Requests.CreateChannel({ cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA, cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT, cs.TARGET_HANDLE: remote_handle}) assertContains((cs.INITIAL_AUDIO, False), props.items()) assertContains((cs.INITIAL_VIDEO, False), props.items()) chan = wrap_channel(bus.get_object(conn.bus_name, p
ath), cs.CHANNEL_TYPE_STREAMED_MEDIA, ['MediaSignalling']) props = chan.Properties.GetAll(cs.CHANNEL_TYPE_STREAMED_MEDIA) assertContains(('InitialAudio', False), props.items()) assertContains(('InitialVideo', False), props.items()) # W
e shouldn't have started a session yet, so there shouldn't be any # session handlers. Strictly speaking, there could be a session handler # with no stream handlers, but... session_handlers = chan.MediaSignalling.GetSessionHandlers() assertLength(0, session_handlers) def check_iav(jt, q, conn, bus, stream, remote_handle, initial_audio, initial_video): """ Make a channel and check that its InitialAudio and InitialVideo properties come out correctly. """ call_async(q, conn.Requests, 'CreateChannel', { cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_STREAMED_MEDIA, cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT, cs.TARGET_HANDLE: remote_handle, cs.INITIAL_AUDIO: initial_audio, cs.INITIAL_VIDEO: initial_video, }) if initial_video and (not jt.jp.can_do_video() or (not initial_audio and not jt.jp.can_do_video_only ())): # Some protocols can't do video event = q.expect('dbus-error', method='CreateChannel') assertEquals(cs.NOT_CAPABLE, event.error.get_dbus_name()) else: path, props = q.expect('dbus-return', method='CreateChannel').value assertContains((cs.INITIAL_AUDIO, initial_audio), props.items()) assertContains((cs.INITIAL_VIDEO, initial_video), props.items()) chan = wrap_channel(bus.get_object(conn.bus_name, path), cs.CHANNEL_TYPE_STREAMED_MEDIA, ['MediaSignalling']) props = chan.Properties.GetAll(cs.CHANNEL_TYPE_STREAMED_MEDIA) assertContains(('InitialAudio', initial_audio), props.items()) assertContains(('InitialVideo', initial_video), props.items()) session_handlers = chan.MediaSignalling.GetSessionHandlers() assertLength(1, session_handlers) path, type = session_handlers[0] assertEquals('rtp', type) session_handler = make_channel_proxy(conn, path, 'Media.SessionHandler') session_handler.Ready() stream_handler_paths = [] stream_handler_types = [] for x in [initial_audio, initial_video]: if x: e = q.expect('dbus-signal', signal='NewStreamHandler') stream_handler_paths.append(e.args[0]) stream_handler_types.append(e.args[2]) if initial_audio: assertContains(cs.MEDIA_STREAM_TYPE_AUDIO, stream_handler_types) if initial_video: assertContains(cs.MEDIA_STREAM_TYPE_VIDEO, stream_handler_types) for x in xrange (0, len(stream_handler_paths)): p = stream_handler_paths[x] t = stream_handler_types[x] sh = make_channel_proxy(conn, p, 'Media.StreamHandler') sh.NewNativeCandidate("fake", jt.get_remote_transports_dbus()) if t == cs.MEDIA_STREAM_TYPE_AUDIO: sh.Ready(jt.get_audio_codecs_dbus()) else: sh.Ready(jt.get_video_codecs_dbus()) sh.StreamState(cs.MEDIA_STREAM_STATE_CONNECTED) e = q.expect('stream-iq', predicate=jt.jp.action_predicate('session-initiate')) jt.parse_session_initiate (e.query) jt.accept() events = reduce(operator.concat, [ [ EventPattern('dbus-signal', signal='SetRemoteCodecs', path=p), EventPattern('dbus-signal', signal='SetStreamPlaying', path=p), ] for p in stream_handler_paths ], []) q.expect_many(*events) chan.Close() def incoming(jp, q, bus, conn, stream): remote_jid = 'skinny.fists@heaven/antennas' jt = JingleTest2(jp, conn, q, stream, 'test@localhost', remote_jid) jt.prepare() self_handle = conn.GetSelfHandle() remote_handle = conn.RequestHandles(cs.HT_CONTACT, [remote_jid])[0] for a, v in [("audio1", None), (None, "video1"), ("audio1", "video1")]: if v!= None and not jp.can_do_video(): continue if a == None and v != None and not jp.can_do_video_only(): continue jt.incoming_call(audio=a, video=v) e = q.expect('dbus-signal', signal='NewChannels', predicate=lambda e: cs.CHANNEL_TYPE_CONTACT_LIST not in e.args[0][0][1].values()) chans = e.args[0] assertLength(1, chans) path, props = chans[0] assertEquals(cs.CHANNEL_TYPE_STREAMED_MEDIA, props[cs.CHANNEL_TYPE]) assertEquals(a != None, props[cs.INITIAL_AUDIO]) assertEquals(v != None, props[cs.INITIAL_VIDEO]) # FIXME: This doesn't check non-Google contacts that can only do one # media type, as such contacts as simulated by JingleTest2 can always # do both. assertEquals(not jp.can_do_video() or not jp.can_do_video_only(), props[cs.IMMUTABLE_STREAMS]) chan = wrap_channel(bus.get_object(conn.bus_name, path), cs.CHANNEL_TYPE_STREAMED_MEDIA) chan.Close() if __name__ == '__main__': test_all_dialects(outgoing) test_all_dialects(incoming)
e('\n'.join(lines).encode('utf-8')) pipeline = TestPipeline() pcoll = pipeline | 'Read' >> ReadFromText( file_name, compression_type=CompressionTypes.BZIP2) assert_that(pcoll, equal_to(lines)) pipeline.run() def test_read_corrupted_bzip2_fails(self): _, lines = write_data(15) with TempDir() as tempdir: file_name = tempdir.create_temp_file() with bz2.BZ2File(file_name, 'wb') as f: f.write('
\n'.join(lines).encode('utf-8')) with open(file_name, 'wb') as f: f.write(b'corrupt') pipeline = TestPipeline() pcoll = pipeline | 'Read' >> ReadFromText( fi
le_name, compression_type=CompressionTypes.BZIP2) assert_that(pcoll, equal_to(lines)) with self.assertRaises(Exception): pipeline.run() def test_read_bzip2_concat(self): with TempDir() as tempdir: bzip2_file_name1 = tempdir.create_temp_file() lines = ['a', 'b', 'c'] with bz2.BZ2File(bzip2_file_name1, 'wb') as dst: data = '\n'.join(lines) + '\n' dst.write(data.encode('utf-8')) bzip2_file_name2 = tempdir.create_temp_file() lines = ['p', 'q', 'r'] with bz2.BZ2File(bzip2_file_name2, 'wb') as dst: data = '\n'.join(lines) + '\n' dst.write(data.encode('utf-8')) bzip2_file_name3 = tempdir.create_temp_file() lines = ['x', 'y', 'z'] with bz2.BZ2File(bzip2_file_name3, 'wb') as dst: data = '\n'.join(lines) + '\n' dst.write(data.encode('utf-8')) final_bzip2_file = tempdir.create_temp_file() with open(bzip2_file_name1, 'rb') as src, open( final_bzip2_file, 'wb') as dst: dst.writelines(src.readlines()) with open(bzip2_file_name2, 'rb') as src, open( final_bzip2_file, 'ab') as dst: dst.writelines(src.readlines()) with open(bzip2_file_name3, 'rb') as src, open( final_bzip2_file, 'ab') as dst: dst.writelines(src.readlines()) pipeline = TestPipeline() lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText( final_bzip2_file, compression_type=beam.io.filesystem.CompressionTypes.BZIP2) expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z'] assert_that(lines, equal_to(expected)) pipeline.run() def test_read_deflate(self): _, lines = write_data(15) with TempDir() as tempdir: file_name = tempdir.create_temp_file() with open(file_name, 'wb') as f: f.write(zlib.compress('\n'.join(lines).encode('utf-8'))) pipeline = TestPipeline() pcoll = pipeline | 'Read' >> ReadFromText( file_name, 0, CompressionTypes.DEFLATE, True, coders.StrUtf8Coder()) assert_that(pcoll, equal_to(lines)) pipeline.run() def test_read_corrupted_deflate_fails(self): _, lines = write_data(15) with TempDir() as tempdir: file_name = tempdir.create_temp_file() with open(file_name, 'wb') as f: f.write(zlib.compress('\n'.join(lines).encode('utf-8'))) with open(file_name, 'wb') as f: f.write(b'corrupt') pipeline = TestPipeline() pcoll = pipeline | 'Read' >> ReadFromText( file_name, 0, CompressionTypes.DEFLATE, True, coders.StrUtf8Coder()) assert_that(pcoll, equal_to(lines)) with self.assertRaises(Exception): pipeline.run() def test_read_deflate_concat(self): with TempDir() as tempdir: deflate_file_name1 = tempdir.create_temp_file() lines = ['a', 'b', 'c'] with open(deflate_file_name1, 'wb') as dst: data = '\n'.join(lines) + '\n' dst.write(zlib.compress(data.encode('utf-8'))) deflate_file_name2 = tempdir.create_temp_file() lines = ['p', 'q', 'r'] with open(deflate_file_name2, 'wb') as dst: data = '\n'.join(lines) + '\n' dst.write(zlib.compress(data.encode('utf-8'))) deflate_file_name3 = tempdir.create_temp_file() lines = ['x', 'y', 'z'] with open(deflate_file_name3, 'wb') as dst: data = '\n'.join(lines) + '\n' dst.write(zlib.compress(data.encode('utf-8'))) final_deflate_file = tempdir.create_temp_file() with open(deflate_file_name1, 'rb') as src, \ open(final_deflate_file, 'wb') as dst: dst.writelines(src.readlines()) with open(deflate_file_name2, 'rb') as src, \ open(final_deflate_file, 'ab') as dst: dst.writelines(src.readlines()) with open(deflate_file_name3, 'rb') as src, \ open(final_deflate_file, 'ab') as dst: dst.writelines(src.readlines()) pipeline = TestPipeline() lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText( final_deflate_file, compression_type=beam.io.filesystem.CompressionTypes.DEFLATE) expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z'] assert_that(lines, equal_to(expected)) def test_read_gzip(self): _, lines = write_data(15) with TempDir() as tempdir: file_name = tempdir.create_temp_file() with gzip.GzipFile(file_name, 'wb') as f: f.write('\n'.join(lines).encode('utf-8')) pipeline = TestPipeline() pcoll = pipeline | 'Read' >> ReadFromText( file_name, 0, CompressionTypes.GZIP, True, coders.StrUtf8Coder()) assert_that(pcoll, equal_to(lines)) pipeline.run() def test_read_corrupted_gzip_fails(self): _, lines = write_data(15) with TempDir() as tempdir: file_name = tempdir.create_temp_file() with gzip.GzipFile(file_name, 'wb') as f: f.write('\n'.join(lines).encode('utf-8')) with open(file_name, 'wb') as f: f.write(b'corrupt') pipeline = TestPipeline() pcoll = pipeline | 'Read' >> ReadFromText( file_name, 0, CompressionTypes.GZIP, True, coders.StrUtf8Coder()) assert_that(pcoll, equal_to(lines)) with self.assertRaises(Exception): pipeline.run() def test_read_gzip_concat(self): with TempDir() as tempdir: gzip_file_name1 = tempdir.create_temp_file() lines = ['a', 'b', 'c'] with gzip.open(gzip_file_name1, 'wb') as dst: data = '\n'.join(lines) + '\n' dst.write(data.encode('utf-8')) gzip_file_name2 = tempdir.create_temp_file() lines = ['p', 'q', 'r'] with gzip.open(gzip_file_name2, 'wb') as dst: data = '\n'.join(lines) + '\n' dst.write(data.encode('utf-8')) gzip_file_name3 = tempdir.create_temp_file() lines = ['x', 'y', 'z'] with gzip.open(gzip_file_name3, 'wb') as dst: data = '\n'.join(lines) + '\n' dst.write(data.encode('utf-8')) final_gzip_file = tempdir.create_temp_file() with open(gzip_file_name1, 'rb') as src, \ open(final_gzip_file, 'wb') as dst: dst.writelines(src.readlines()) with open(gzip_file_name2, 'rb') as src, \ open(final_gzip_file, 'ab') as dst: dst.writelines(src.readlines()) with open(gzip_file_name3, 'rb') as src, \ open(final_gzip_file, 'ab') as dst: dst.writelines(src.readlines()) pipeline = TestPipeline() lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText( final_gzip_file, compression_type=beam.io.filesystem.CompressionTypes.GZIP) expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z'] assert_that(lines, equal_to(expected)) def test_read_all_gzip(self): _, lines = write_data(100) with TempDir() as tempdir: file_name = tempdir.create_temp_file() with gzip.GzipFile(file_name, 'wb') as f: f.write('\n'.join(lines).encode('utf-8')) pipeline = TestPipeline() pcoll = (pipeline | Create([file_name]) | 'ReadAll' >> ReadAllFromText( compression_type=CompressionTypes.GZIP)) assert_that(pcoll, equal_to(lines)) pipeline.run() def test_read_gzip_large(self): _, lines = write_data(10000) with TempDir() as tempdir: file_name = tempdir.create_temp_fil
fro
m django.dispatch import receiver from pretix.base.signals import register_payment_providers
@receiver(register_payment_providers, dispatch_uid="payment_paypal") def register_payment_provider(sender, **kwargs): from .payment import Paypal return Paypal
from django.db import models class AdjacencyListModel(models.Model): title = models.CharField(max_length=100) parent = models.ForeignKey( 'self', related_name='%(class)s_parent', on_delete=models.CASCADE, db_index=True, null=True, blank=True) def __str__(self): return 'adjacencylistmodel_%s' % self.title class NestedSetModel(models.Model): title = models.CharField(max_length=100) lft = models.IntegerField(db_index=True) rgt = models.IntegerField(db_in
dex=True) level = models.IntegerField(db_index=Tru
e) def __str__(self): return 'nestedsetmodel_%s' % self.title
# -*- coding: utf-8 -*- # Generate
d by Django 1.11.13 on 2019-04-10 03:58 from __future__ import
unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('netdevice', '0006_auto_20190409_0325'), ] operations = [ migrations.RenameField( model_name='vrf', old_name='vrf_name', new_name='name', ), migrations.RenameField( model_name='vrf', old_name='vrf_target', new_name='target', ), ]
from functools import reduce class ScopedString (object): def __init__ (self): self._stack = [] def push (self, frame): self._stack.append (frame) def pop (self): frame = self._stack.pop() return frame def __str__ (self): return '.'.join (self._stack) class ScopedList (object): def __init__ (self, stack=None): if stack: self._stack = stack else: self._stack = [] self.push() def push (self): self._stack.append ([]) def pop (self): if (len (self._stack) <= 1): raise IndexError ("Attempt to pop global scope") self._stack.pop() def append (self, val): self._stack[-1].append (val) def _normalize (self): return reduce (lambda x, y: x + y, self._stack, []) def __str__ (self): return str (self._normalize()) def __repr__ (self): return "ScopedDict(" + repr(self._stack) + ")" def __iter__ (self): return self._normalize().__iter__() class ScopedDict (object): def __init__ (self, stack=None): if stack: self._stack = stack else: self._stack = [] self.push () def push (self):
self._stack.insert (0, {}) def pop (self): if (len (self._stack) <= 1): raise IndexError ("Attempt to pop global scope") tem
p = self._stack[0] del (self._stack[0]) return temp def _normalize (self): normal = {} for frame in self._stack: for key, value in frame.items(): if key not in normal: normal[key] = value return normal def __getitem__ (self, key): for frame in self._stack: if key in frame: return frame[key] raise KeyError (key) def __setitem__ (self, key, value): self._stack[0][key] = value def __contains__ (self, key): for frame in self._stack: if key in frame: return True return False def __str__ (self): return str (self._normalize()) def __repr__ (self): return "ScopedDict(" + repr(self._stack) + ")" def __iter__ (self): return self._normalize().__iter__() def items (self): return self._normalize().items() def keys (self): return self._normalize().keys() def values (self): return self._normalize().values()
"""A likelihood function representing a Student-t distribution. Author: Ilias Bilionis Date: 1/21/2013 """ __all__ = ['StudentTLikelihoodFunction'] import numpy as np import scipy import math from . import GaussianLikelihoodFunction class StudentTLikelihoodFunction(GaussianLikelihoodFunction): """An object representing a Student-t likelihood function.""" # The degrees of freedom _nu = None @property def nu(self): """Get the degrees of freedom.""" return self._nu @nu.setter def nu(self, value): """Set the degrees of freedom.""" if not isinstance(value, float): raise TypeError('nu must be a float.') self._nu = value def __init__(self, nu, num_input=None, data=None, mean_function=None, cov=None, name='Student-t
Likelihood Function'): """Initialize the object. Arguments: nu --- The degrees of freedom of the dist
ribution. Keyword Arguments num_input --- The number of inputs. Optional, if mean_function is a proper Function. data --- The observed data. A vector. Optional, if mean_function is a proper Function. It can be set later. mean_function --- The mean function. See the super class for the description. cov --- The covariance matrix. It can either be a positive definite matrix, or a number. The data or a proper mean_funciton is preassumed. name --- A name for the likelihood function. """ self.nu = nu super(StudentTLikelihoodFunction, self).__init__(num_input=num_input, data=data, mean_function=mean_function, cov=cov, name=name) def __call__(self, x): """Evaluate the function at x.""" mu = self.mean_function(x) y = scipy.linalg.solve_triangular(self.L_cov, self.data - mu) return ( - 0.5 * (self.nu + self.num_data) * math.log(1. + np.dot(y, y) / self.nu))
# coding: utf-8 # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- """ FILE: sample_index_crud_operations_async.py DESCRIPTION: This sample demonstrates how to get, create, update, or delete an index. USAGE: python sample_index_crud_operations_async.py Set the environment variables with your own values before running the sample: 1) AZURE_SEARCH_SERVICE_ENDPOINT - the endpoint of your Azure Cognitive Search service 2) AZURE_SEARCH_API_KEY - your search API key """ import os import asyncio service_endpoint = os.getenv("AZURE_SEARCH_SERVICE_ENDPOINT") key = os.getenv("AZURE_SEARCH_API_KEY") from azure.core.credentials import AzureKeyCredential from azure.search.documents.indexes.aio import SearchIndexClient from azure.search.documents.indexes.models import ( ComplexField, CorsOptions, SearchIndex, ScoringProfile, SearchFieldDataType, SimpleField, SearchableField ) client = SearchIndexClient(service_endpoint, AzureKeyCredential(key)) async def create_index(): # [START create_index_async] name = "hotels" fields = [ SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True), SimpleField(name="baseRate", type=SearchFieldDataType.Double), SearchableField(name="description", type=SearchFieldDataType.String, collection=True), ComplexField(name="address", fields=[ SimpleField(name="streetAddress", type=SearchFieldDataType.String), SimpleField(name="city", type=SearchFieldDataType.String), ], collection=True) ] cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) scoring_profiles = [] index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, cors_options=cors_options) result = await client.create_index(index) # [END create_index_async] async def get_index(): # [START get_index_async] name = "hotels" result = await client.get_index(name) # [END get_index_async] async def update_index(): # [START update_index_async] name = "hotels" fields = [ SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True), SimpleField(name="baseRate", type=SearchFieldDataType.Double), SearchableField(name="description", type=SearchFieldDataType.String, collection=True), SearchableField(name="hotelName", type=SearchFieldDataType.String), ComplexField(name="address", fields=[ SimpleField(name="streetAddress", type=SearchFieldDataType.String), SimpleField(name="city", type=SearchFieldDataType.String), SimpleFi
eld(name="state", type=SearchFieldDataType.String), ], collection=True) ] cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) scoring_profile = ScoringProfile( name="MyProfile" ) scoring_profiles = [] scoring_profiles.append(scoring_profile) index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, cors_options=cors_options) result = a
wait client.create_or_update_index(index=index) # [END update_index_async] async def delete_index(): # [START delete_index_async] name = "hotels" await client.delete_index(name) # [END delete_index_async] async def main(): await create_index() await get_index() await update_index() await delete_index() await client.close() if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main()) loop.close()
def RGB01ToHex(rgb): """ Return an RGB color
value as a hex color string. """ return '#%02x%02x%02x' % tuple([int(x * 255) for x in rgb]) def hexToRGB01(hexColor): """ Return a hex color string as an RGB tuple of floats in the range 0..1 """ h = hexColor.lstrip('#') return tuple([x / 255.0 for x in [int(h[i:i + 2], 16) for i in (0, 2, 4)
]])
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a
copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from resource_management import * class ECSServiceCheck(Script): def service_check(self, env
): import params env.set_params(params) # run fs list command to make sure ECS client can talk to ECS backend list_command = format("fs -ls /") if params.security_enabled: Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"), user=params.hdfs_user ) ExecuteHadoop(list_command, user=params.hdfs_user, logoutput=True, conf_dir=params.hadoop_conf_dir, try_sleep=3, tries=20, bin_dir=params.hadoop_bin_dir ) if __name__ == "__main__": ECSServiceCheck().execute()
# Copyright (c) 2008 Mikeal Rogers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distribuetd under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # l
imitations under the License. from dealer.git import git from django.template import RequestContext requestcontext = None class MakoMiddleware(object): def process_request(self, request): global requestcontext requestcontext = RequestContext(request)
requestcontext['is_secure'] = request.is_secure() requestcontext['site'] = request.get_host() requestcontext['REVISION'] = git.revision
""" Module with functionality for splitting and shuffling datasets. """ import numpy as np from sklearn.utils import murmurhash3_32 from spotlight.interactions import Interactions def _index_or_none(array, shuffle_index): if array is None: return None else: return array[shuffle_index] def shuffle_interactions(interactions, random_state=None): """ Shuffle interactions. Parameters ---------- interactions: :class:`spotlight.interactions.Interactions` The interactions to shuffle. random_state: np.random.RandomState, optional The random state used for the shuffle. Returns ------- interactions: :class:`spotlight.interactions.Interactions` The shuffled interactions. """ if random_state is None: random_state = np.random.RandomState() shuffle_indices = np.arange(len(interactions.user_ids)) random_state.shuffle(shuffle_indices) return Interactions(interactions.user_ids[shuffle_indices], interactions.item_ids[shuffle_indices], ratings=_index_or_none(interactions.ratings, shuffle_indices), timestamps=_index_or_none(interactions.timestamps, shuffle_indices), weights=_index_or_none(interactions.weights, shuffle_indices), num_users=interactions.num_users, num_items=interactions.num_items) def random_train_test_split(interactions, test_percentage=0.2, random_state=None): """ Randomly split interactions between training and testing. Parameters ---------- interactions: :class:`spotlight.interactions.Interactions` The interactions to shuffle. test_percentage: float, optional The fraction of interactions to place in the test set. random_state: np.random.RandomState, optional The random state used for the shuffle. Returns ------- (train, test): (:class:`spotlight.interactions.Interactions`, :class:`spotlight.interactions.Interactions`) A tuple of (train data, test data) """ interactions = shuffle_interactions(interactions,
random_state=random_state) cutoff = int((1.0 - test_percentage) * len(interactions)) train_idx = slice(None, cutoff) test_idx = slice(cutoff, None) train = Interaction
s(interactions.user_ids[train_idx], interactions.item_ids[train_idx], ratings=_index_or_none(interactions.ratings, train_idx), timestamps=_index_or_none(interactions.timestamps, train_idx), weights=_index_or_none(interactions.weights, train_idx), num_users=interactions.num_users, num_items=interactions.num_items) test = Interactions(interactions.user_ids[test_idx], interactions.item_ids[test_idx], ratings=_index_or_none(interactions.ratings, test_idx), timestamps=_index_or_none(interactions.timestamps, test_idx), weights=_index_or_none(interactions.weights, test_idx), num_users=interactions.num_users, num_items=interactions.num_items) return train, test def user_based_train_test_split(interactions, test_percentage=0.2, random_state=None): """ Split interactions between a train and a test set based on user ids, so that a given user's entire interaction history is either in the train, or the test set. Parameters ---------- interactions: :class:`spotlight.interactions.Interactions` The interactions to shuffle. test_percentage: float, optional The fraction of users to place in the test set. random_state: np.random.RandomState, optional The random state used for the shuffle. Returns ------- (train, test): (:class:`spotlight.interactions.Interactions`, :class:`spotlight.interactions.Interactions`) A tuple of (train data, test data) """ if random_state is None: random_state = np.random.RandomState() minint = np.iinfo(np.uint32).min maxint = np.iinfo(np.uint32).max seed = random_state.randint(minint, maxint, dtype=np.int64) in_test = ((murmurhash3_32(interactions.user_ids, seed=seed, positive=True) % 100 / 100.0) < test_percentage) in_train = np.logical_not(in_test) train = Interactions(interactions.user_ids[in_train], interactions.item_ids[in_train], ratings=_index_or_none(interactions.ratings, in_train), timestamps=_index_or_none(interactions.timestamps, in_train), weights=_index_or_none(interactions.weights, in_train), num_users=interactions.num_users, num_items=interactions.num_items) test = Interactions(interactions.user_ids[in_test], interactions.item_ids[in_test], ratings=_index_or_none(interactions.ratings, in_test), timestamps=_index_or_none(interactions.timestamps, in_test), weights=_index_or_none(interactions.weights, in_test), num_users=interactions.num_users, num_items=interactions.num_items) return train, test
, {'primary_key': 'True'}), 'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}), 'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Party']"}) }, 'meps.delegation': { 'Meta': {'object_name': 'Delegation'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'meps.delegationrole': { 'Meta': {'object_name': 'DelegationRole'}, 'begin': ('django.db.models.fields.DateField', [], {'null': 'True'}), 'delegation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Delegation']"}), 'end': ('django.db.models.fields.DateField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}), 'role': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'meps.group': { 'Meta': {'object_name': 'Group'}, 'abbreviation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) }, 'meps.groupmep': { 'Meta': {'object_name': 'GroupMEP'}, 'begin': ('django.db.models.fields.DateField', [], {'null': 'True'}), 'end': ('django.db.models.fields.DateField', [], {'null': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}), 'role': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'meps.mep': { 'Meta': {'ordering': "['last_name']", 'object_name': 'MEP', '_ormbases': ['reps.Representative']}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'bxl_building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bxl_building'", 'to': "orm['meps.Building']"}), 'bxl_fax': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'bxl_floor': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'bxl_office_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'bxl_phone1': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'bxl_phone2': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'committees': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Committee']", 'through': "orm['meps.CommitteeRole']", 'symmetrical': 'False'}), 'countries': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Country']", 'through': "orm['meps.CountryMEP']", 'symmetrical': 'False'}), 'delegations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Delegation']", 'through': "orm['meps.DelegationRole']", 'symmetrical': 'False'}), 'ep_debates': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'ep_declarations': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'ep_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}), 'ep_motions': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'ep_opinions': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'ep_questions': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'ep_reports': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'ep_webpage': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Group']", 'through': "orm['meps.GroupMEP']", 'symmetrical': 'False'}), 'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['meps.Organization']", 'through': "orm['meps.OrganizationMEP']", 'symmetrical': 'False'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}), 'representative_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['reps.Representative']", 'unique': 'True', 'primary_key': 'True'}), 'stg_building': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stg_building'", 'to': "orm['meps.Building']"}), 'stg_fax': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'stg_floor': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'stg_office_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'stg_phone1': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'stg_phone2': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'total_score': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True'}) }, 'meps.organization': { 'Meta': {'object_name': 'Organization'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'meps.organizationmep': { 'Meta': {'object_name': 'OrganizationMEP'}, 'begin': ('django.db.models.fields.DateField', [], {}), 'end': ('django.db.models.fields.DateField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.Organization']"}), 'role': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'meps.postaladdress': { 'Meta': {'object_name': 'PostalAddress'}, 'addr': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mep': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['meps.MEP']"}) }, 'reps.opinion': { 'Meta': {'object_name': 'Opinion'}, 'content': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1023'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}) }, 'reps.opinionrep': { 'Meta': {'object_name': 'OpinionREP'}, 'date': ('django.db.models.fields.DateField', [], {}), 'id': ('django.db.model
s.fields.AutoField', [], {'primary_key': 'True'}), 'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Opinion']"}), 'representative': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reps.Representative']"}) }, 'reps.party': { 'Meta': {'object_name': 'Party'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'
unique': 'True', 'max_length': '255'}) }, 'reps.partyrepresentative': { 'Meta': {'object_name': 'PartyRepresentative'}, 'current': (
# importing libraries: import maya.cmds as cmds import maya.mel as mel # global variables to this module: CLASS_NAME = "Arm" TITLE = "m028_arm" DESCRIPTION = "m029_armDesc" ICON = "/Icons/dp_arm.png" def Arm(dpAutoRigInst): """ This function will create all guides needed to compose an arm. """ # check modules integrity: guideDir = 'Modules' checkModuleList = ['dpLimb', 'dpFinger'] checkResultList = dpAutoRigInst.startGuideModules(guideDir, "check", None, checkModuleList=checkModuleList) if len(checkResultList) == 0: # creating module instances: armLimbInstance = dpAutoRigInst.initGuide('dpLimb', guideDir) # change name to arm: dpAutoRigInst.guide.Limb.editUserName(armLimbInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m028_arm'].capitalize()) # create finger instances: indexFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir) dpAutoRigInst.guide.Finger.editUserName(indexFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m032_index']) middleFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir) dpAutoRigInst.guide.Finger.editUserName(middleFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m033_middle']) ringFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir) dpAutoRigInst.guide.Finger.editUserName(ringFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m034_ring']) pinkFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir) dpAutoRigInst.guide.Finger.editUserName(pinkFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m035_pink']) thumbFingerInstance = dpAutoRigInst.initGuide('dpFinger', guideDir) dpAutoRigInst.guide.Finger.editUserName(thumbFingerInstance, checkText=dpAutoRigInst.langDic[dpAutoRigInst.langName]['m036_thumb']) # edit arm limb guide: armBaseGuide = armLimbInstance.moduleGrp cmds.setAttr(armBaseGuide+".translateX", 2.5) cmds.setAttr(armBaseGuide+".translateY", 16) cmds.setAttr(armBaseGuide+".displayAnnotation", 0) cmds.setAttr(armLimbInstance.cvExtremLoc+".translateZ", 7) cmds.setAttr(armLimbInstance.radiusCtrl+".translateX", 1.5) # edit finger guides: fingerInstanceList = [indexFingerInstance, middleFingerInstance, ringFingerInstance, pinkFingerInstance, thumbFingerInstance] fingerTZList = [0.6, 0.2, -0.2, -0.6, 0.72] for n, fingerInstance in enumerate(fingerInstanceList): cmds.setAttr(fingerInstance.moduleGrp+".translateX", 11) cmds.setAttr(fingerInstance.moduleGrp+".translateY", 16) cmds.setAttr(fingerInstance.moduleGrp+".translateZ", fingerTZList[n]) cmds.setAttr(fingerInstance.moduleGrp+".displayAnnotation", 0) cmds.setAttr(fingerInstance.radiusCtrl+".translateX", 0.3) cmds.setAttr(fingerInstance.annotation+".visibility", 0) if n == len(fingerInstanceList)-1: # correct not commun values for thumb guide: cmds.setAttr(thumbFingerInstance.moduleGrp+".translateX", 10.1) cmds.setAttr(thumbFingerInstance.moduleGrp+".rotateX", 60) dpAutoRigInst.guide.Finger.changeJointNumber(thumbFingerInstance, 2) cmds.setAttr(thumbFingerInstance.moduleGrp+".nJoints", 2) # parent finger guide to the arm wrist guide: cmds.parent(fingerInstance.moduleGrp, armLimbInstance.cvExtremLoc, absolute=True) # select the armGuide_Base: cmds.select(armBaseGuide) else: # error checking modules in the folder: mel.eval('error \"'+ dpAutoRigInst.langDic[dpAutoRigInst.
langName]['e001_GuideNotChecked'] +' - '+ (", ").join(checkResultList) +'
\";')
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-03-28 15:26 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.
Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('genevieve_client', '0003_variant_myvariant_dbsnp'), ] operations = [ migrations.CreateModel( name='OpenHumansUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serial
ize=False, verbose_name='ID')), ('access_token', models.CharField(blank=True, max_length=30)), ('refresh_token', models.CharField(blank=True, max_length=30)), ('token_expiration', models.DateTimeField(null=True)), ('connected_id', models.CharField(max_length=30, unique=True)), ('openhumans_username', models.CharField(blank=True, max_length=30)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.RemoveField( model_name='gennoteseditor', name='gennotes_id', ), migrations.RemoveField( model_name='gennoteseditor', name='genome_storage_enabled', ), migrations.AddField( model_name='gennoteseditor', name='connected_id', field=models.CharField(default=0, max_length=30, unique=True), preserve_default=False, ), ]
from __future__ import print_function, division import sys import os sys.path.append(os.path.abspath(".")) sys.dont_write_bytecode = True from distribution import * import operator as o from utils.lib import gt, lt, gte, lte, neq, eq __author__ = "bigfatnoob" def sample(values, size=100): return np.random.choice(values, size=size) def expected_value(values, size=1000): means = [] for _ in range(1000): samples = sample(values, int(size)) means.append(np.mean(samples)) return np.mean(means) def standard_deviation(values): return np.std(values) def percentile(values, percent): return np.percentile(values, percent) def probability(values): return sum([1 if v >= 1 else 0 for v in values]) / len(values) def lambda_ev(*args): return lambda x: expected_value(x, *args) def lambda_std(): return lambda x: standard_deviation(x) def lambda_percentile(*args): return lambda x: percentile(x, *args) def lambda_probability(): return lambda x: probability(x) def to_int(func): return lambda a, b: 1 if func(a, b) else 0 evaluations = { "EV": lambda_ev, "STD": lambda_std, "PERCENTILE": lambda_percentile, "PROBABILITY": lambda_probability } distributions = { "constant": Constant, "normal": Normal, "normalCI": NormalCI,
"uniform": Uniform, "random": Random, "exp": Exponential, "binomial": Binomial, "geometric": Geometric, "triangular": Triangular } operations = { "+": o.add, "-": o.sub, "*": o.mul, "/": o.div, "|": max, "&": o.mul, ">": to_int(gt),
"<": to_int(lt), ">=": to_int(gte), "<=": to_int(lte), "==": to_int(eq), "!=": to_int(neq) }
#!/usr/bin/env python """Distutils setup file, used to install or test 'setuptools'""" import textwrap import sys try: import setuptools except ImportError: sys.stderr.write("Distribute 0.7 may only upgrade an existing " "Distribute 0.6 installation") raise SystemExit(1) long_description = textwrap.dedent(""" Distribute - legacy package This package is a simple compatibility layer that installs Setuptools 0.7+. """).lstrip() setup_params = dict( name="distribute", version='0.7.3', description="distribute legacy wrapper", author="The fellowship of the packaging", author_email="[email protected]", license="PSF or ZPL", long_description=long_description, keywords="CPAN PyPI distutils eggs package management", url="http://packages.python.org/distribute", zip_safe=True, classifiers=textwrap.dedent(""" Development Status :: 5 - Production/Stable Intended Audience :: Developers License :: OSI Approved :: Python Software Foundation License License :: OSI Approved :: Zope Public License Operating System :: OS Independent Programming Language :: Python :: 2.4 Programming Language :: Python :: 2.5 Programming Language :: Python :: 2.6 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.1 Programming Language :: Python :: 3.2 Programming Language :: Python :: 3.3
Topic :: Software Development :: Libraries :: Python Modules Topic :: System :: Archiving :: Packaging Topic :: System :: Systems Administration Topic :: Utilities """).strip().splitlines(), install_requires=[ 'setuptools>=0.7', ], )
if __name__ == '__main__': setuptools.setup(**setup_params)
from django.core.management.base import BaseCommand, CommandError from t
weets.tasks import stream #The class must be named
Command, and subclass BaseCommand class Command(BaseCommand): # Show this when the user types help help = "My twitter stream command" # A command must define handle() def handle(self, *args, **options): stream()
#!/usr/bin/env python # coding=utf-8 """ Created on April 15 2017 @author: yytang """ from scrapy import Selector from libs.misc import get_spider_name_from_domain from libs.polish import polish_title, polish_subtitle, polish_content from novelsCrawler.spiders.novelSpider import NovelSpider class PiaotianSpider(NovelSpider): """ classdocs example: https://www.piaotian.com/html/9/9459/index.html """ allowed_domains = ['www.piaotian.com'] name = get_spider_name_from_domain(allowed_domains[0]) # custom_settings = { # 'DOWNLOAD_DELAY': 0.3, # } def parse_title(self, response): sel = Selector(response) title = sel.xpath('//h1/text()').extract()[0] title = polish_title(title, self.name) return title def parse_episodes(self, response): sel = Selector(response) episodes = [] subtitle_selectors = sel.xpath('//div[@class="centent"]/ul/li/a') for page_id, subtitle_selector in enumerate(subtitle_selectors): subtitle_url = subtitle_selector.xpath('@href').extract()[0] subtitle_url = response.urljoin(subtitle_url.strip()) subtitle_name = subtitle_selector.xpath('text()').extract()[0] subtitle_name = polish_subtitle(subtitle_name) episodes.append((page_id, subtitle_name, subtitle_url)) return episodes def parse_content(self, response):
# sel = Selector(response) # content = sel.xpath('//div[@id="content"]/p/text()').extract() # content = polish_content(content) html = str(response.body.decode('GBK')) pattern = r'&nbsp;&nbsp;&nbsp;&nbsp;(.*)' import re m = re.search(pattern, html) if m: content = m.grou
p(1) else: content = '' content = content.replace('<br /><br />&nbsp;&nbsp;&nbsp;&nbsp;', '\n\n') return content
a, b = <warning descr="Need more val
ues to unpack">None
</warning>
""" @author: dhoomake
thu """ from __future__ import absolute_import
, unicode_literals
#!/usr/env/bin/ python3 from setuptools import setup, Extension # #CXX_FLAGS = "-O3 -std=gnu++11 -Wall -Wno-comment" # ## List of C/C++ sources that will conform the library #sources = [ # # "andrnx/clib/android.c", # #] setup(name="andrnx", version="0.1", description="Package to convert from GNSS logger to Rinex files", author='Miquel Garcia', author_email='i
[email protected]', url='https://www.rokubun.cat', packages=['andrnx'], te
st_suite="andrnx.test", scripts=['bin/gnsslogger_to_rnx'])
class DrawingDimensioningWorkbench (Workbench): # Icon generated using by converting linearDimension.svg to xpm format using Gimp Icon = ''' /* XPM */ static char * linearDimension_xpm[] = { "32 32 10 1", " c None", ". c #000000", "+ c #0008FF", "@ c #0009FF", "# c #000AFF", "$ c #00023D", "% c #0008F7", "& c #0008EE", "* c #000587", "= c #000001", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". +@@ + .", ". @+@@+ +@@+@ .", ". +@+@@@@@@ @@@@@@@# .", "$%@@@@@@@@@+@@@@@@@@@@@@@@@@@@&$", ". #@@@@@@@@ #+@@@@@@@@*=", ". @+@@+ +@@@@@ .", ". +@ #@++ .", ". # .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". .", ". ."}; ''' MenuText = 'Drawing Dimensioning' def Initialize(self): import importlib, os from dimensioning import __dir__, debugPrint, iconPath import linearDimension import linearDimension_stack import deleteDimension import circularDimension import grabPointAdd import textAdd import textEdit import textMove import escapeDimensioning import angularDimension import radiusDimension import centerLines import noteCircle import toleranceAdd commandslist = [ 'dd_linearDimension', #where dd is short-hand for drawing dimensioning 'dd_linearDimensionStack', 'dd_circularDimension', 'dd_radiusDimension', 'dd_angularDimension', 'dd_centerLines', 'dd_centerLine', 'dd_noteCircle', 'dd_grabPoint', 'dd_addText', 'dd_editText', 'dd_moveText', 'dd_addTolerance', 'dd_deleteDimension', 'dd_escapeDimensioning', ] self.appendToolbar('Drawing Dimensioning', commandslist) import unfold import unfold_bending_note import unfold_export_to_dxf unfold_cmds = [ 'dd_unfold', 'dd_bendingNote', ] if hasattr(os,'uname') and os.uname()[0] == 'Linux' : #this command only works on Linux systems unfold_cmds.append('dd_exportToDxf') self.appendToolbar( 'Drawing Dimensioning Folding', unfold_cmds ) import weldingSymbols if int( FreeCAD.Version()[1] > 15 ) and int( FreeCAD.Version()[2].split()[0] ) > 5165: weldingCommandList = ['dd_weldingGroupCommand'] else:
weldingCommandList = weldingSymbols.weldingCmds self.appendToolbar('Drawing Dimensioning Welding Symbols', weldingCommandList)
self.appendToolbar('Drawing Dimensioning Help', [ 'dd_help' ]) FreeCADGui.addIconPath(iconPath) FreeCADGui.addPreferencePage( os.path.join( __dir__, 'Resources', 'ui', 'drawing_dimensioing_prefs-base.ui'),'Drawing Dimensioning' ) Gui.addWorkbench(DrawingDimensioningWorkbench())
# Copyright (c) 2009-2010 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:[email protected] # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distr
ibuted in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA. from tortoisehg.util import hglib, patchctx from tortoisehg.hgqt.qtlib import geticon, getoverlaidicon from PyQt4.QtCore import * from PyQt4.QtGui import * nullvariant = QVariant() def getSubrepoIcoDict(): 'Return a dictionary mapping each subrepo type to the corresponding icon' _subrepoType2IcoMap = { 'hg': 'hg', 'git': 'thg-git-subrepo', 'svn': 'thg-svn-subrepo', 'hgsubversion': 'thg-svn-subrepo', 'empty': 'hg' } icOverlay = geticon('thg-subrepo') subrepoIcoDict = {} for stype in _subrepoType2IcoMap: ic = geticon(_subrepoType2IcoMap[stype]) ic = getoverlaidicon(ic, icOverlay) subrepoIcoDict[stype] = ic return subrepoIcoDict class HgFileListModel(QAbstractTableModel): """ Model used for listing (modified) files of a given Hg revision """ showMessage = pyqtSignal(QString) def __init__(self, parent): QAbstractTableModel.__init__(self, parent) self._boldfont = parent.font() self._boldfont.setBold(True) self._ctx = None self._files = [] self._filesdict = {} self._fulllist = False self._subrepoIcoDict = getSubrepoIcoDict() @pyqtSlot(bool) def toggleFullFileList(self, value): self._fulllist = value self.loadFiles() self.layoutChanged.emit() def __len__(self): return len(self._files) def rowCount(self, parent=None): return len(self) def columnCount(self, parent=None): return 1 def file(self, row): return self._files[row]['path'] def setContext(self, ctx): reload = False if not self._ctx: reload = True elif self._ctx.rev() is None: reload = True elif ctx.thgid() != self._ctx.thgid(): reload = True if reload: self._ctx = ctx self.loadFiles() self.layoutChanged.emit() def fileFromIndex(self, index): if not index.isValid() or index.row()>=len(self) or not self._ctx: return None row = index.row() return self._files[row]['path'] def dataFromIndex(self, index): if not index.isValid() or index.row()>=len(self) or not self._ctx: return None row = index.row() return self._files[row] def indexFromFile(self, filename): if filename in self._filesdict: row = self._files.index(self._filesdict[filename]) return self.index(row, 0) return QModelIndex() def _buildDesc(self, parent): files = [] ctxfiles = self._ctx.files() modified, added, removed = self._ctx.changesToParent(parent) ismerge = bool(self._ctx.p2()) # Add the list of modified subrepos to the top of the list if not isinstance(self._ctx, patchctx.patchctx): if ".hgsubstate" in ctxfiles or ".hgsub" in ctxfiles: from mercurial import subrepo # Add the list of modified subrepos for s, sd in self._ctx.substate.items(): srev = self._ctx.substate.get(s, subrepo.nullstate)[1] stype = self._ctx.substate.get(s, subrepo.nullstate)[2] sp1rev = self._ctx.p1().substate.get(s, subrepo.nullstate)[1] sp2rev = '' if ismerge: sp2rev = self._ctx.p2().substate.get(s, subrepo.nullstate)[1] if srev != sp1rev or (sp2rev != '' and srev != sp2rev): wasmerged = ismerge and s in ctxfiles files.append({'path': s, 'status': 'S', 'parent': parent, 'wasmerged': wasmerged, 'stype': stype}) # Add the list of missing subrepos subreposet = set(self._ctx.substate.keys()) subrepoparent1set = set(self._ctx.p1().substate.keys()) missingsubreposet = subrepoparent1set.difference(subreposet) for s in missingsubreposet: wasmerged = ismerge and s in ctxfiles stype = self._ctx.p1().substate.get(s, subrepo.nullstate)[2] files.append({'path': s, 'status': 'S', 'parent': parent, 'wasmerged': wasmerged, 'stype': stype}) if self._fulllist and ismerge: func = lambda x: True else: func = lambda x: x in ctxfiles for lst, flag in ((added, 'A'), (modified, 'M'), (removed, 'R')): for f in filter(func, lst): wasmerged = ismerge and f in ctxfiles f = self._ctx.removeStandin(f) files.append({'path': f, 'status': flag, 'parent': parent, 'wasmerged': wasmerged}) return files def loadFiles(self): self._files = [] try: self._files = self._buildDesc(0) if bool(self._ctx.p2()): _paths = [x['path'] for x in self._files] _files = self._buildDesc(1) self._files += [x for x in _files if x['path'] not in _paths] except EnvironmentError, e: self.showMessage.emit(hglib.tounicode(str(e))) self._filesdict = dict([(f['path'], f) for f in self._files]) def data(self, index, role): if not index.isValid() or index.row()>len(self) or not self._ctx: return nullvariant if index.column() != 0: return nullvariant row = index.row() column = index.column() current_file_desc = self._files[row] current_file = current_file_desc['path'] if role in (Qt.DisplayRole, Qt.ToolTipRole): return QVariant(hglib.tounicode(current_file)) elif role == Qt.DecorationRole: if self._fulllist and bool(self._ctx.p2()): if current_file_desc['wasmerged']: icn = geticon('thg-file-merged') elif current_file_desc['parent'] == 0: icn = geticon('thg-file-p0') elif current_file_desc['parent'] == 1: icn = geticon('thg-file-p1') return QVariant(icn.pixmap(20,20)) elif current_file_desc['status'] == 'A': return QVariant(geticon('fileadd')) elif current_file_desc['status'] == 'R': return QVariant(geticon('filedelete')) elif current_file_desc['status'] == 'S': stype = current_file_desc.get('stype', 'hg') return QVariant(self._subrepoIcoDict[stype]) #else: # return QVariant(geticon('filemodify')) elif role == Qt.FontRole: if current_file_desc['wasmerged']: return QVariant(self._boldfont) else: return nullvariant
"""Maya initialisation for Mindbender pipeline""" from maya import cmds def setup(): assert __import__("pyblish_maya").is_setup(), ( "pyblish-mindbender dep
ends on pyblish_maya which has not " "yet
been setup. Run pyblish_maya.setup()") from pyblish import api api.register_gui("pyblish_lite") from mindbender import api, maya api.install(maya) # Allow time for dependencies (e.g. pyblish-maya) # to be installed first. cmds.evalDeferred(setup)
""":mod:`kinsumer.checkpointer` --- Persisting positions for Kinesis shards ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ import abc import json import os.path from typing import Optional, Dict class Checkpointer(abc.ABC, object): """Checkpointer is the interface for persisting positions for Kinesis shards """ @abc.abstractmethod def get_checkpoints(self) -> Dict[str, str]: """Get a dictionary whose keys are all the shard ids we are aware of, and whose values are the sequence id
of the last record processed for its shard """ @abc.abstractmethod def checkpoint(self, shard_id: str, sequence: str) -> None: """Persist the sequence number for a given shard""" @abc.abstractmethod def get_checkpoint(self, shard_id: str) -> Optional[str]: "
""Get the sequence number of the last successfully processed record""" class InMemoryCheckpointer(Checkpointer): def __init__(self) -> None: self._checkpoints = {} def get_checkpoints(self) -> Dict[str, str]: return self._checkpoints.copy() def checkpoint(self, shard_id: str, sequence: str) -> None: self._checkpoints[shard_id] = sequence def get_checkpoint(self, shard_id: str) -> Optional[str]: return self._checkpoints.get(shard_id) class FileCheckpointer(InMemoryCheckpointer): def __init__(self, file: str) -> None: super().__init__() self.file = os.path.expanduser(file) if os.path.exists(self.file): with open(self.file, 'rb') as f: self._checkpoints = json.load(f) def checkpoint(self, shard_id: str, sequence: str) -> None: super().checkpoint(shard_id, sequence) with open(self.file, 'wb') as f: f.write(json.dumps(self._checkpoints, ensure_ascii=False).encode())
class ParametrizedError(Exception): def __init__(self, problem, invalid): self.problem = str(problem) self.invalid = str(invalid) def __str__(self): print('--- Error: {0}\n--- Caused by: {1}'.format(self.problem, self.invalid)) class InvalidToken(ParametrizedError): pass class ToneError(ParametrizedError): pass class IntervalError(ParametrizedError): pass class TriolaError(ParametrizedError): pass class ConfigError(ParametrizedError): pass
class ComposingError(P
arametrizedError): pass
# Copyright 2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # class TestFailoverStatus(object): def test_
get_status(self, request, mgmt_root): failover_status =
mgmt_root.tm.cm.failover_status assert failover_status._meta_data['uri'].endswith( "/mgmt/tm/cm/failover-status/") failover_status.refresh() des =\ (failover_status.entries['https://localhost/mgmt/tm/cm/failover-status/0'] ['nestedStats'] ['entries'] ['status'] ['description']) assert des == "ACTIVE"
# third party # third party import numpy as np import pytest # syft absolute # absolute from syft.core.tensor.smpc.share_tensor import ShareTensor @pytest.mark.smpc def test
_bit_extraction() -> None: share = ShareTensor(rank=0, parties_info=[], ring_size=2**32) data = np.array([[21, 32], [-54, 89]], dtype=np.int32) share.child = data exp_res1 = np.array([[False
, False], [True, False]], dtype=np.bool_) res = share.bit_extraction(31).child assert (res == exp_res1).all() exp_res2 = np.array([[True, False], [False, False]], dtype=np.bool_) res = share.bit_extraction(2).child assert (res == exp_res2).all() @pytest.mark.smpc def test_bit_extraction_exception() -> None: share = ShareTensor(rank=0, parties_info=[], ring_size=2**32) data = np.array([[21, 32], [-54, 89]], dtype=np.int32) share.child = data with pytest.raises(Exception): share >> 33 with pytest.raises(Exception): share >> -1
/Code/AltAnalyze/AltDatabase/EnsMart72/ensembl/Mm/Mm_Ensembl_exon.txt' reference_rows=0 if '.gtf' in refExonCoordinateFile: firstLine = False else: firstLine = True for line in open(refExonCoordinateFile,'rU').xreadlines(): if firstLine: firstLine=False else: line = line.rstrip('\n') reference_rows+=1 t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions' geneID, exon, chr, strand, start, stop = t[:6] if chr_status == False: chr = string.replace(chr,'chr','') o.write(string.join([chr,start,stop,geneID+':'+exon,'',strand],'\t')+'\n') start = int(start); stop = int(stop) #geneID = string.split(exon,':')[0] splicesite_db[chr,start]=geneID splicesite_db[chr,stop]=geneID if 'I' in exon: try: introns[geneID].append([start,stop]) except Exception: introns[geneID] = [[start,stop]] files = getFiles(directory) for file in files: firstLine=True if 'junction' in file and '.bed' in file: for line in open(directory+'/'+file,'rU').xreadlines(): if firstLine: firstLine=False else: line = line.rstrip('\n') t = string.split(line,'\t'); #'12', '6998470', '6998522', 'ENSG00000111671:E1.1_ENSE00001754003', '0', '-' chr, exon1_start, exon2_stop, junction_id, reads, strand, null, null, null, null, lengths, null = t exon1_len,exon2_len=string.split(lengths,',')[:2]; exon1_len = int(exon1_len); exon2_len = int(exon2_len) exon1_start = int(exon1_start); exon2_stop = int(exon2_stop) if strand == '-': exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1 ### Exons have the opposite order a = exon1_start,exon1_stop; b = exon2_start,exon2_stop exon1_stop,exon1_start = b; exon2_stop,exon2_start = a else: exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1 seq_length = abs(float(exon1_stop-exon2_start)) ### Junction distance key = chr,exon1_stop,exon2_start if (chr,exon1_stop) not in splicesite_db: ### record the splice site and position of the max read if (chr,exon2_start) in splicesite_db: ### only include splice sites where one site is known geneID = splicesite_db[(chr,exon2_start)] novel_db[chr,exon1_stop,strand] = exon1_start,geneID,5 real_splicesites[chr,exon2_start]=None elif (chr,exon2_start) not in splicesite_db: ### record the splice site and position of the max read if (chr,exon1_stop) in splicesite_db: ### only include splice sites where one site is known #if 121652702 ==exon2_start: #print chr, exon1_start,exon1_stop,exon2_start,exon2_stop, strand;sys.exit() geneID = splicesite_db[(chr,exon1_stop)] novel_db[chr,exon2_start,strand] = exon2_stop,geneID,3 real_splicesites[chr,exon1_stop]=None else: real_splicesites[chr,exon1_stop]=None real_splicesites[chr,exon2_start]=None print len(novel_db), 'novel splice sites and', len(real_splicesites), 'known splice sites.' gene_organized={} for (chr,pos1,strand) in novel_db: pos2,geneID,type = novel_db[(chr,pos1,strand)] try: gene_organized[chr,geneID,strand].append([pos1,pos2,type]) except Exception: gene_organized[chr,geneID,strand] = [[pos1,pos2,type]] def intronCheck(geneID,coords): ### see if the coordinates are within a given intron try: for ic in introns[geneID]: if withinQuery(ic,coords): return True except Exception: pass def withinQuery(ls1,ls2): imax = max(ls1) imin = min(ls1) qmax = max(ls2) qmin = min(ls2) if qmin >= imin and qmax <= imax: return True else: return False ### Compare the novel splice site locations in each gene added=[] for (chr,geneID,strand) in gene_organized: gene_organized[(chr,geneID,strand)].sort() if strand == '-': gene_organized[(chr,geneID,strand)].reverse() i=0 set = gene_organized[(chr,geneID,strand)] for (pos1,pos2,type) in set: k = [pos1,pos2] annotation='novel' if i==0 and type == 3: if len(set)>1: if set[i+1][-1]==5: l = [set[i+1][0],pos1] if (max(l)-min(l))<300 and intronCheck(geneID,l): k=l #print chr,k annotation='novel-paired' elif type == 5: if set[i-1][-1]==3: l = [set[i-1][0],pos1] if (max(l)-min(l))<300 and intronCheck(geneID,l): k=l #print chr,k annotation='novel-paired' k.sort(); i+=1 if k not in added: values = string.join([chr,str(k[0]),str(k[1]),geneID+':'+annotation,'',strand],'\t')+'\n' added.append(k) o.write(values) o.close() if __name__ == '__main__': import multiprocessing as mlp refExonCoordinateFile = '' outputExonCoordinateRefBEDfile = '' #bam_dir = "H9.102.2.6.bam" #outputExonCoordinateRefBEDfile = 'H9.102.2.6__exon.bed' ################ Comand-line arguments ################ if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments print "Warning! Please designate a directory containing BAM files as input in the command-line" print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --g /Users/me/ReferenceExonCoordinates/Hs_Ensembl_exon_hg19.txt --r /Users/me/ExonBEDRef/Hs_Ensembl_exon-cancer_hg19.bed --a exon --a junction --a reference" print "Example: python multiBAMtoBED.py --i /Users/me/BAMfiles --a junction" sys.exit() else: analysisType = [] useMultiProcessing=False options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','g=','r=','a=','m=']) for opt, arg in options: if opt == '--i': bam_dir=arg elif opt == '--g': refExonCoordinateFile=arg elif opt == '--r': outputExonCoordinateRefBEDfile=arg elif opt == '--a': analysisType.append(arg) ### options are: all, junction, exon, reference
elif opt == '--m': ### Run each BAM file on a different processor if arg == 'yes': useMultiProcessing=True elif arg == 'True': useMultiProce
ssing=True else: useMultiProcessing=False else: print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit() if len(analysisType) == 0: analysisType = ['exon','junction','reference'] try: refExonCoordinateFile = refExonCoordinateFile outputExonCoordinateRefBEDfile = outputExonCoordinateRefBEDfile except Exception: print 'Please provide a exon coordinate text file using the option --g and a output coordinate file path (--r) to generate exon.bed files' analysisType = ['junction'] refExonCoordinateFile = '' outputExonCoordinateRefBEDfile
#!/usr/bin/env python3 from django.shortcuts import render # Create your views here. from CnbetaApis.datas.Models import * from CnbetaApis.datas.get_letv_json import get_letv_json from CnbetaApis.datas.get_youku_json import get_youku_json from django.views.decorators.csrf import csrf_exempt from django.http import * from datetime import timezone, timedelta import json def getrelate(ids, session): relateds = session.query(Article).filter(Article.id.in_(ids)) relateds_arr = [] for related in relateds: relateds_arr.append({ 'id': related.id, 'title': related.title, 'url': related.url, }) return relate
ds_arr def get_home_data(request): if not request.method == 'GET': raise HttpResponseNotAllowed('GET') lastID = request.GET.get('lastid') limit = request.GET.get('limit') or 20 session = DBSession() datas = None
if lastID: datas = session.query(Article).order_by(desc(Article.id)).filter(and_(Article.introduction != None, Article.id < lastID)).limit(limit).all() else: datas = session.query(Article).order_by(desc(Article.id)).limit(limit).all() values = [] for data in datas: values.append({ 'id': data.id, 'title': data.title, 'url': data.url, 'source': data.source, 'imgUrl': data.imgUrl, 'introduction': data.introduction, 'createTime': data.createTime.replace(tzinfo=timezone(timedelta(hours=8))).astimezone(timezone.utc).timestamp(), 'related': getrelate(data.related.split(','), session), 'readCount': data.readCount, 'opinionCount': data.opinionCount, }) session.close() return JsonResponse({"result": values}) def get_article_content(request): if not request.method == 'GET': raise HttpResponseNotAllowed('GET') article_id = request.GET.get('id') session = DBSession() datas = session.query(Article).filter(Article.id == article_id).all() if not len(datas): raise Http404('Article not exist') data = datas[0] result = {'result': { 'id': data.id, 'title': data.title, 'url': data.url, 'imgUrl': data.imgUrl, 'source': data.source, 'introduction': data.introduction, 'createTime': data.createTime.replace(tzinfo=timezone(timedelta(hours=8))).astimezone(timezone.utc).timestamp(), 'related': getrelate(data.related.split(','), session), 'readCount': data.readCount, 'opinionCount': data.opinionCount, 'content': json.loads(data.content), }} session.close() return JsonResponse(result) @csrf_exempt def get_video_realUrl(req): if not req.method == 'POST': raise HttpResponseNotAllowed('POST') source_url = req.POST.get('url') source_type = req.POST.get('type') if source_type == "youku": source_url = get_youku_json(source_url) elif source_type == "letv": source_url = get_letv_json(source_url) else: raise Http404('Article not exist') return JsonResponse({"result": source_url})
ET, CONF_DEVICE_ID, CONF_ENTITIES, CONF_NAME, CONF_TRACK, DEVICE_SCHEMA, SERVICE_SCAN_CALENDARS, do_setup, ) from homeassistant.const import STATE_OFF, STATE_ON from homeassistant.helpers.template import DATE_STR_FORMAT from homeassistant.setup import async_setup_component from homeassistant.util import slugify import homeassistant.util.dt as dt_util from tests.common import async_mock_service GOOGLE_CONFIG = {CONF_CLIENT_ID: "client_id", CONF_CLIENT_SECRET: "client_secret"} TEST_ENTITY = "calendar.we_are_we_are_a_test_calendar" TEST_ENTITY_NAME = "We are, we are, a... Test Calendar" TEST_EVENT = { "summary": "Test All Day Event", "start": {}, "end": {}, "location": "Test Cases", "description": "test event", "kind": "calendar#event", "created": "2016-06-23T16:37:57.000Z", "transparency": "transparent", "updated": "2016-06-24T01:57:21.045Z", "reminders": {"useDefault": True}, "organizer": { "email": "[email protected]", "displayName": "Organizer Name", "self": True, }, "sequence": 0, "creator": { "email": "[email protected]", "displayName": "Organizer Name", "self": True, }, "id": "_c8rinwq863h45qnucyoi43ny8", "etag": '"2933466882090000"', "htmlLink": "https://www.google.com/calendar/event?eid=*******", "iCalUID": "[email protected]", "status": "confirmed", } def get_calendar_info(calendar): """Convert data from Google into DEVICE_SCHEMA.""" calendar_info = DEVICE_SCHEMA( { CONF_CAL_ID: calendar["id"], CONF_ENTITIES: [ { CONF_TRACK: calendar["track"], CONF_NAME: calendar["summary"], CONF_DEVICE_ID: slugify(calendar["summary"]), } ], } ) return calendar_info @pytest.fixture(autouse=True) def mock_google_setup(hass, test_calendar): """Mock the google set up functions.""" hass.loop.run_until_complete(async_setup_component(hass, "group", {"group": {}})) calendar = get_calendar_info(test_calendar) calendars = {calendar[CONF_CAL_ID]: calendar} patch_google_auth = patch( "homeassistant.components.google.do_authentication", side_effect=do_setup ) patch_google_load = patch( "homeassistant.components.google.load_config", return_value=calendars ) patch_google_services = patch("homeassistant.components.google.setup_services") async_mock_service(hass, "google", SERVICE_SCAN_CALENDARS) with patch_google_auth, patch_google_load, patch_google_services: yield @pytest.fixture(autouse=True) def mock_http(hass): """Mock the http component.""" hass.http = Mock() @pytest.fixture(autouse=True) def set_time_zone(): """Set the time zone for the tests.""" # Set our timezone to CST/Regina so we can check calculations # This keeps UTC-6 all year round dt_util.set_default_time_zone(dt_util.get_time_zone("America/Regina")) yield dt_util.set_default_time_zone(dt_util.get_time_zone("UTC")) @pytest.fixture(name="google_service") def mock_google_service(): """Mock google service.""" patch_google_service = patch( "homeassistant.components.google.calendar.GoogleCalendarService" ) with patch_google_service as mock_service: yield mock_service async def test_all_day_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" week_from_today = dt_util.dt.date.today() + dt_util.dt.timedelta(days=7) end_event = week_from_today + dt_util.dt.timedelta(days=1) event = copy.deepcopy(TEST_EVENT) start = week_from_today.isoformat() end = end_event.isoformat() event["start"]["date"] = start event["end"]["date"] = end mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_OFF assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_N
AME, "message": event["summary"], "all_day": True, "offset_reached": False, "start_time": week_from_today.strftime(DATE_STR_FORMA
T), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } async def test_future_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" one_hour_from_now = dt_util.now() + dt_util.dt.timedelta(minutes=30) end_event = one_hour_from_now + dt_util.dt.timedelta(minutes=60) start = one_hour_from_now.isoformat() end = end_event.isoformat() event = copy.deepcopy(TEST_EVENT) event["start"]["dateTime"] = start event["end"]["dateTime"] = end mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_OFF assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_NAME, "message": event["summary"], "all_day": False, "offset_reached": False, "start_time": one_hour_from_now.strftime(DATE_STR_FORMAT), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } async def test_in_progress_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" middle_of_event = dt_util.now() - dt_util.dt.timedelta(minutes=30) end_event = middle_of_event + dt_util.dt.timedelta(minutes=60) start = middle_of_event.isoformat() end = end_event.isoformat() event = copy.deepcopy(TEST_EVENT) event["start"]["dateTime"] = start event["end"]["dateTime"] = end mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_ON assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_NAME, "message": event["summary"], "all_day": False, "offset_reached": False, "start_time": middle_of_event.strftime(DATE_STR_FORMAT), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } async def test_offset_in_progress_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" middle_of_event = dt_util.now() + dt_util.dt.timedelta(minutes=14) end_event = middle_of_event + dt_util.dt.timedelta(minutes=60) start = middle_of_event.isoformat() end = end_event.isoformat() event_summary = "Test Event in Progress" event = copy.deepcopy(TEST_EVENT) event["start"]["dateTime"] = start event["end"]["dateTime"] = end event["summary"] = "{} !!-15".format(event_summary) mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_OFF assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_NAME, "message": event_summary, "all_day": False, "offset_reached": True, "start_time": middle_of_event.strftime(DATE_STR_FORMAT), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } @pytest.mark.skip async def test_all_day_offset_in_progress_event(hass, mock_next_event): "
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ******************** **espressopp.Int3D** ******************** .. function:: espressopp.__Int3D(\*args) :param \*args: :type \*args: .. function:: espresso
pp.__Int3D.x(v, [0) :param v: :param [0: :type v: :type [0: :rtype: .. funct
ion:: espressopp.__Int3D.y(v, [1) :param v: :param [1: :type v: :type [1: :rtype: .. function:: espressopp.__Int3D.z(v, [2) :param v: :param [2: :type v: :type [2: :rtype: .. function:: espressopp.toInt3DFromVector(\*args) :param \*args: :type \*args: .. function:: espressopp.toInt3D(\*args) :param \*args: :type \*args: """ from _espressopp import Int3D from espressopp import esutil # This injects additional methods into the Int3D class and pulls it # into this module class __Int3D(Int3D) : __metaclass__ = esutil.ExtendBaseClass __originit = Int3D.__init__ def __init__(self, *args): if len(args) == 0: x = y = z = 0.0 elif len(args) == 1: arg0 = args[0] if isinstance(arg0, Int3D): x = arg0.x y = arg0.y z = arg0.z # test whether the argument is iterable and has 3 elements elif hasattr(arg0, '__iter__') and len(arg0) == 3: x, y, z = arg0 elif isinstance(arg0, int): x = y = z = arg0 else : raise TypeError("Cannot initialize Int3D from %s" % (args)) elif len(args) == 3 : x, y, z = args else : raise TypeError("Cannot initialize Int3D from %s" % (args)) return self.__originit(x, y, z) # create setters and getters @property def x(self): return self[0] @x.setter def x(self, v): self[0] = v @property def y(self) : return self[1] @y.setter def y(self, v) : self[1] = v @property def z(self) : return self[2] @z.setter def z(self, v) : self[2] = v # string conversion def __str__(self) : return str((self[0], self[1], self[2])) def __repr__(self) : return 'Int3D' + str(self) def toInt3DFromVector(*args): """Try to convert the arguments to a Int3D. This function will only convert to a Int3D if x, y and z are specified.""" if len(args) == 1: arg0 = args[0] if isinstance(arg0, Int3D): return arg0 elif hasattr(arg0, '__iter__') and len(arg0) == 3: return Int3D(*args) elif len(args) == 3: return Int3D(*args) raise TypeError("Specify x, y and z.") def toInt3D(*args): """Try to convert the arguments to a Int3D, returns the argument, if it is already a Int3D.""" if len(args) == 1 and isinstance(args[0], Int3D): return args[0] else: return Int3D(*args)
# -*- coding: utf-8 -*- # Copyright (C) Duncan Macleod (2018-2020) # # This file is part of GWpy. # # GWpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GWpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWpy. If not, see <http://www.gnu.org/licenses/>. """Tests for `gwpy.plot.segments` """ import pytest import numpy from matplotlib import rcParams from matplotlib.colors import ColorConverter from matplotlib.collections import PatchCollection from ...segments import (Segment, SegmentList, SegmentListDict, DataQualityFlag, DataQualityDict) from ...time import to_gps from .. import SegmentAxes from ..segments import SegmentRectangle from .test_axes import TestAxes as _TestAxes # extract color cycle COLOR_CONVERTER = ColorConverter() COLOR_CYCLE = rcParams['axes.prop_cycle'].by_key()['color'] COLOR0 = COLOR_CONVERTER.to_rgba(COLOR_CYCLE[0]) class TestSegmentAxes(_TestAxes): AXES_CLASS = SegmentAxes @staticmethod @pytest.fixture() def segments(): return SegmentList([Segment(0, 3), Segment(6, 7)]) @staticmethod @pytest.fixture() def flag(): known = SegmentList([Segment(0, 3), Segment(6, 7)]) active = SegmentList([Segment(1, 2), Segment(3, 4), Segment(5, 7)]) return DataQualityFlag(name='Test segments', known=known, active=active) def test_plot_flag(self, ax, flag): c = ax.plot_flag(flag) assert c.get_label() == flag.texname assert len(ax.collections) == 2 assert ax.collections[0] is c flag.isgood = False c = ax.plot_flag(flag) assert tuple(c.get_facecolors()[0]) == (1., 0., 0., 1.) c = ax.plot_flag(flag, known={'facecolor': 'black'}) c = ax.plot_flag(flag, known='fancy') def test_plot_dqflag(self, ax, flag): with pytest.deprecated_call(): ax.plot_dqflag(flag) assert ax.collections # make sure it plotted something def test_plot_dict(self, ax, flag): dqd = DataQualityDict() dqd['a'] = flag dqd['b'] = flag colls = ax.plot_dict(dqd) assert len(colls) == len(dqd) assert all(isinstance(c, PatchCollection) for c in colls) assert colls[0].get_label() == 'a' assert colls[1].get_label() == 'b' colls = ax.plot_dict(dqd, label='name') assert colls[0].get_label() == 'Test segments' colls = ax.plot_dict(dqd, label='anything') assert colls[0].get_label() == 'anything' def test_plot_dqdict(self, ax, flag): with pytest.deprecated_call(): ax.plot_dqdict(DataQualityDict(a=flag)) def test_plot_segmentlist(self, ax, segments): c = ax.plot_segmentlist(segments) assert isinstance(c, PatchCollection) assert numpy.isclose(ax.dataLim.x0, 0.) assert numpy.isclose(ax.dataLim.x1, 7.) assert len(
c.get_paths()) == len(segments) assert ax.get_epoch() == segments[0][0] # test y p = ax.plot_segmentlist(segments).get_paths()[0].get_extents() assert p.y0 + p.height/2. == 1.
p = ax.plot_segmentlist(segments, y=8).get_paths()[0].get_extents() assert p.y0 + p.height/2. == 8. # test kwargs c = ax.plot_segmentlist(segments, label='My segments', rasterized=True) assert c.get_label() == 'My segments' assert c.get_rasterized() is True # test collection=False c = ax.plot_segmentlist(segments, collection=False, label='test') assert isinstance(c, list) assert not isinstance(c, PatchCollection) assert c[0].get_label() == 'test' assert c[1].get_label() == '' assert len(ax.patches) == len(segments) # test empty c = ax.plot_segmentlist(type(segments)()) def test_plot_segmentlistdict(self, ax, segments): sld = SegmentListDict() sld['TEST'] = segments ax.plot(sld) def test_plot(self, ax, segments, flag): dqd = DataQualityDict(a=flag) ax.plot(segments) ax.plot(flag) ax.plot(dqd) ax.plot(flag, segments, dqd) def test_insetlabels(self, ax, segments): ax.plot(segments) ax.set_insetlabels(True) def test_fmt_data(self, ax): # just check that the LIGOTimeGPS repr is in place value = 1234567890.123 assert ax.format_xdata(value) == str(to_gps(value)) # -- disable tests from upstream def test_imshow(self): return NotImplemented def test_segmentrectangle(): patch = SegmentRectangle((1.1, 2.4), 10) assert patch.get_xy(), (1.1, 9.6) assert numpy.isclose(patch.get_height(), 0.8) assert numpy.isclose(patch.get_width(), 1.3) assert patch.get_facecolor() == COLOR0 # check kwarg passing patch = SegmentRectangle((1.1, 2.4), 10, facecolor='red') assert patch.get_facecolor() == COLOR_CONVERTER.to_rgba('red') # check valign patch = SegmentRectangle((1.1, 2.4), 10, valign='top') assert patch.get_xy() == (1.1, 9.2) patch = SegmentRectangle((1.1, 2.4), 10, valign='bottom') assert patch.get_xy() == (1.1, 10.0) with pytest.raises(ValueError): patch = SegmentRectangle((0, 1), 0, valign='blah')
# Copyright (c) 2008 Chris Moyer http://coredumped.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHE
R IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. class Qualifications: def __init__(self, requirements=None): if requirements == None: requirements = [] self.requirements = requirements def add(self, req):
self.requirements.append(req) def get_as_params(self): params = {} assert(len(self.requirements) <= 10) for n, req in enumerate(self.requirements): reqparams = req.get_as_params() for rp in reqparams: params['QualificationRequirement.%s.%s' % ((n+1),rp) ] = reqparams[rp] return params class Requirement(object): """ Representation of a single requirement """ def __init__(self, qualification_type_id, comparator, integer_value=None, required_to_preview=False): self.qualification_type_id = qualification_type_id self.comparator = comparator self.integer_value = integer_value self.required_to_preview = required_to_preview def get_as_params(self): params = { "QualificationTypeId": self.qualification_type_id, "Comparator": self.comparator, } if self.comparator != 'Exists' and self.integer_value is not None: params['IntegerValue'] = self.integer_value if self.required_to_preview: params['RequiredToPreview'] = "true" return params class PercentAssignmentsSubmittedRequirement(Requirement): """ The percentage of assignments the Worker has submitted, over all assignments the Worker has accepted. The value is an integer between 0 and 100. """ def __init__(self, comparator, integer_value, required_to_preview=False): Requirement.__init__(self, qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class PercentAssignmentsAbandonedRequirement(Requirement): """ The percentage of assignments the Worker has abandoned (allowed the deadline to elapse), over all assignments the Worker has accepted. The value is an integer between 0 and 100. """ def __init__(self, comparator, integer_value, required_to_preview=False): Requirement.__init__(self, qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class PercentAssignmentsReturnedRequirement(Requirement): """ The percentage of assignments the Worker has returned, over all assignments the Worker has accepted. The value is an integer between 0 and 100. """ def __init__(self, comparator, integer_value, required_to_preview=False): Requirement.__init__(self, qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class PercentAssignmentsApprovedRequirement(Requirement): """ The percentage of assignments the Worker has submitted that were subsequently approved by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. """ def __init__(self, comparator, integer_value, required_to_preview=False): Requirement.__init__(self, qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class PercentAssignmentsRejectedRequirement(Requirement): """ The percentage of assignments the Worker has submitted that were subsequently rejected by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. """ def __init__(self, comparator, integer_value, required_to_preview=False): Requirement.__init__(self, qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class NumberHitsApprovedRequirement(Requirement): """ Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0. """ def __init__(self, comparator, integer_value, required_to_preview=False): Requirement.__init__(self, qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) class LocaleRequirement(Requirement): """ A Qualification requirement based on the Worker's location. The Worker's location is specified by the Worker to Mechanical Turk when the Worker creates his account. """ def __init__(self, comparator, locale, required_to_preview=False): Requirement.__init__(self, qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview) self.locale = locale def get_as_params(self): params = { "QualificationTypeId": self.qualification_type_id, "Comparator": self.comparator, 'LocaleValue.Country': self.locale, } if self.required_to_preview: params['RequiredToPreview'] = "true" return params class AdultRequirement(Requirement): """ Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default). """ def __init__(self, comparator, integer_value, required_to_preview=False): Requirement.__init__(self, qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
# Determine whether an integer is a palindrome. Do this without extra space. class Solution: # @return a boolean def isPalindrome1(self, x): if x < 0 or x % 10 == 0 and x:
return False xhalf = 0 while x > xhalf: xhalf = xhalf * 10 + x % 10 x /= 10 return (x == xhalf or x == xhalf/10 ) def isPalindrome(self, x): if x < 0: return False
size, xreverse = x, 0 while size: xreverse = xreverse * 10 + size % 10 size = (size - (size % 10)) / 10 return True if xreverse==x else False if __name__ == '__main__': s = Solution() print s.isPalindrome1(0)
''' Created on Aug 27, 2013 @author: De
von Define gui events ''' from pyHopeEngine import BaseEvent class Event_ButtonPressed(BaseEvent): '''Sent when a button is pressed''' eventType = "ButtonPressed" def
__init__(self, value): '''Contains a value identifying the button''' self.value = value class Event_ScreenResize(BaseEvent): '''Sent when a screen resize is requestsed''' eventType = "ScreenResize" def __init__(self, width, height): self.width = width self.height = height
#!/usr/bin/env python # -*- coding: utf-8 -*- import mysql.connector import time import datetime conn = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="drupal") cann = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="content_delivery_weather") cursor = conn.cursor() cursar = cann.cursor() cursor.execute("""SELECT uid, mail FROM users""") rows = cursor.fetchall() for row in rows: if row[0] != 0: print('{0} : {1} '.format(row[0], row[1])) #print('UPDATE new_v4_users_probes_edit SET email = {0} WHERE uid = {1}'.form
at(row[1], row[0])) cursar.execute("""UPDATE new_v4_users_probes_edit SET email = %s WHERE userid = %s""",
(row[1], row[0])) cursar.execute("""SELECT probename, probeid FROM new_v4_sonde""") rows = cursar.fetchall() for row in rows: cursar.execute("""SHOW TABLES LIKE %s""",("%" + row[0] + "%",)) rowsbis = cursar.fetchall() for rowbis in rowsbis: result = rowbis[0].split("_") month = 1 + int(result[4]) s = "01/" + str(month) + "/" + result[3] timestamp = time.mktime(datetime.datetime.strptime(s, "%d/%m/%Y").timetuple()) print('{0} : {1} year: {2} month: {3} timestamp: {4}'.format(row[0], rowbis[0], result[3], result[4], round(timestamp,0))) cursar.execute("""SELECT firsttime FROM new_v4_sonde WHERE probeid = %s""",(row[1],)) rowsbisbis = cursar.fetchall() for rowbisbis in rowsbisbis: if rowbisbis[0] == None: cursar.execute("""UPDATE new_v4_sonde SET firsttime = %s WHERE probeid = %s""",(timestamp,row[1])) print('firsttime: {0}'.format(rowbisbis[0],)) conn.close() cann.close()
# I made some modifications to termcolor so you can pass HEX colors to # the colored function. It then chooses the nearest xterm 256 color to # that HEX color. This requires some color functions that I have added # in my python path. # # 2015/02/16 # # # coding: utf-8 # Copyright (c) 2008-2011 Volvox Development Team # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # Author: Konstantin Lepa <[email protected]> """ANSII Color formatting for output in terminal.""" from __future__ import print_function import os import re from hexrgb_conversion import rgb from x256 import from_rgb __ALL__ = ["colored", "cprint"] VERSION = (1, 1, 0) ATTRIBUTES = dict( list( zip( ["bold", "dark", "", "underline", "blink", "", "reverse", "concealed"], list(range(1, 9)), ) ) ) del ATTRIBUTES[""] HIGHLIGHTS = dict( list( zip( [ "on_grey", "on_red", "on_green", "on_yellow", "on_blue", "on_magenta", "on_cyan", "on_white", ], list(range(40, 48)), ) ) ) COLORS = dict( list( zip( ["grey", "red", "green", "yellow", "blue", "magenta", "cyan", "white"], list(range(30, 38)), ) ) ) RESET = "\033[0m" def colored(text, color=None, on_color=None, attrs=None): """Colorize text. I made some modification so you can pass HEX colors too Available text colors: red, green, yellow, blue, magenta, cyan, white. Available text highlights: on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white. Available attributes: bold, dark, underline, blink, reverse, concealed. Example: colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink']) colored('Hello, World!', 'green') """ if os.getenv("ANSI_COLORS_DISABLED") is None: fmt_str = "\033[%dm%s" if color is not None: if "#" in color: color = re.sub("[#]", ""
, color) RGB = rgb(color) x256_color_index = from_rgb(RGB[0], RGB[1], RGB[2]) text = "\033[38;5;%dm
%s" % (x256_color_index, text) else: text = fmt_str % (COLORS[color], text) if on_color is not None: if "#" in on_color: on_color = re.sub("[#]", "", on_color) RGB = rgb(on_color) x256_color_index = from_rgb(RGB[0], RGB[1], RGB[2]) text = "\033[48;5;%dm%s" % (x256_color_index, text) else: text = fmt_str % (HIGHLIGHTS[on_color], text) if attrs is not None: for attr in attrs: text = fmt_str % (ATTRIBUTES[attr], text) text += RESET return text def cprint(text, color=None, on_color=None, attrs=None, **kwargs): """Print colorize text. It accepts arguments of print function. """ print((colored(text, color, on_color, attrs)), **kwargs) if __name__ == "__main__": print("Current terminal type: %s" % os.getenv("TERM")) print("Test basic colors:") cprint("Grey color", "grey") cprint("Red color", "red") cprint("Green color", "green") cprint("Yellow color", "yellow") cprint("Blue color", "blue") cprint("Magenta color", "magenta") cprint("Cyan color", "cyan") cprint("White color", "white") print(("-" * 78)) print("Test highlights:") cprint("On grey color", on_color="on_grey") cprint("On red color", on_color="on_red") cprint("On green color", on_color="on_green") cprint("On yellow color", on_color="on_yellow") cprint("On blue color", on_color="on_blue") cprint("On magenta color", on_color="on_magenta") cprint("On cyan color", on_color="on_cyan") cprint("On white color", color="grey", on_color="on_white") print("-" * 78) print("Test attributes:") cprint("Bold grey color", "grey", attrs=["bold"]) cprint("Dark red color", "red", attrs=["dark"]) cprint("Underline green color", "green", attrs=["underline"]) cprint("Blink yellow color", "yellow", attrs=["blink"]) cprint("Reversed blue color", "blue", attrs=["reverse"]) cprint("Concealed Magenta color", "magenta", attrs=["concealed"]) cprint( "Bold underline reverse cyan color", "cyan", attrs=["bold", "underline", "reverse"], ) cprint( "Dark blink concealed white color", "white", attrs=["dark", "blink", "concealed"], ) print(("-" * 78)) print("Test mixing:") cprint("Underline red on grey color", "red", "on_grey", ["underline"]) cprint("Reversed green on red color", "green", "on_red", ["reverse"]) print("Using HEX colors:") cprint("Use HEX color EE2E2F", "#EE2E2F")
#! /usr/bin/python ''' Suppose a sorted array is rotated at some pivot unknown to you beforehand. (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2). Find the minimum element. ''' class Solution: # @param num, a list of integer # @return an integer # You may assume no duplicate exists in the array. def findMinNoDuplicate(self, num): INT_MIN_VALUE = -(2**32) size = len(num) if size == 0: return INT_MIN_VALUE elif size == 1: return num[0] low_index = 0 high_index = size - 1 while (low_index < high_index - 1): mid_index = low_index + (high_index - low_index) / 2 if (num[mid_index] > num[high_index]): low_index = mid_index else: high_index = mid_index return min(num[low_index], num[high_index]) # @param num, a list of integer # @return an integer # You may assume duplicate exists in the
array. def findMinDuplicate(self, num): INT_MIN_VALUE = -(2**32) size = len(num) if size == 0: return INT_MIN_VALUE elif size == 1: return num[0] low_index = 0 high_index = size - 1 while (low_index < high_index - 1): mid_index = low_index + (high_index - low_index) / 2 if (num
[mid_index] > num[high_index]): low_index = mid_index elif (num[mid_index] < num[high_index]): high_index = mid_index else: high_index -= 1 return min(num[low_index], num[high_index]) if __name__ == '__main__': solution = Solution() print solution.findMinDuplicate([3,3,1,2,2])
import pytest import re import capybara class TestHasSelector: @pytest.fixture(autouse=True) def setup_session(self, session): session.visit("/with_html") def test_is_true_if_the_given_selector_is_on_the_page(self, session): assert session.has_selector("xpath", "//p") assert session.has_selector("css", "p a#foo") assert session.has_selector("//p[contains(.,'est')]") def test_is_false_if_the_given_selector_is_not_on_the_page(self, session): assert not session.has_selector("xpath", "//abbr") assert not session.has_selector("css", "p a#doesnotexist") assert not session.has_selector("//p[contains(.,'thisstringisnotonpage')]") def test_uses_default_selector(self, session): capybara.default_selector = "css" assert not session.has_selector("p a#doesnotexist") assert session.has_selector("p a#foo") def test_respects_scopes(self, session): with session.scope("//p[@id='first']"): assert session.has_selector(".//a[@id='foo']") assert not session.has_selector(".//a[@id='red']") def test_is_true_if_the_content_is_on_the_page_the_given_number_of_times(self, session): assert session.has_selector("//p", count=3) assert session.has_selector("//p//a[@id='foo']", count=1) assert session.has_selector("//p[contains(.,'est')]", count=1) def test_is_false_if_the_content_is_not_on_the_page_the_given_number_of_times(self, session): assert not session.has_selector("//p", count=6) assert not session.has_selector("//p//a[@id='foo']", count=2) assert not session.has_selector("//p[contains(.,'est')]", count=5) def test_is_false_if_the_content_is_not_on_the_page_at_all(self, session): assert not session.has_selector("//abbr", count=2) assert not session.has_selector("//p//a[@id='doesnotexist']", count=1) def test_discards_all_matches_where_the_given_string_is_not_contained(self, session): assert session.has_selector("//p//a", text="Redirect", count=1) assert not session.has_selector("//p", text="Doesnotexist") def test_respects_visibility_setting(self, session): assert session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=False) assert not session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=True) capybara.ignore_hidden_elements = False assert session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=False) capybara.visible_text_only = True assert not session.has_selector("id", "hidden-text", text="Some of this text is hidden!", visible=True) def test_discards_all_matches_where_the_given_regex_is_not_matched(self, session): assert session.has_selector("//p//a", text=re.compile("re[dab]i", re.IGNORECASE), count=1) assert not session.has_selector("//p//a", text=re.compile("Red$")) def test_only_matches_elements_that_match_exact_text_exactly(self, session): assert session.has_selector("id", "h2one", exact_text="Header Class Test One") assert not session.has_selector("id", "h2one", exact_text="Header Class Test") def
test_only_matches_elements_that_match_exactly_when_exact_text_true(self, session): assert session.has_selector("id", "h2one", text="Header Class Test One", exact_text=True) assert not session.has_selector("id", "h2one", text="Header Class Test", ex
act_text=True) def test_matches_substrings_when_exact_text_false(self, session): assert session.has_selector("id", "h2one", text="Header Class Test One", exact_text=False) assert session.has_selector("id", "h2one", text="Header Class Test", exact_text=False) class TestHasNoSelector: @pytest.fixture(autouse=True) def setup_session(self, session): session.visit("/with_html") def test_is_false_if_the_given_selector_is_on_the_page(self, session): assert not session.has_no_selector("xpath", "//p") assert not session.has_no_selector("css", "p a#foo") assert not session.has_no_selector("//p[contains(.,'est')]") def test_is_true_if_the_given_selector_is_not_on_the_page(self, session): assert session.has_no_selector("xpath", "//abbr") assert session.has_no_selector("css", "p a#doesnotexist") assert session.has_no_selector("//p[contains(.,'thisstringisnotonpage')]") def test_uses_default_selector(self, session): capybara.default_selector = "css" assert session.has_no_selector("p a#doesnotexist") assert not session.has_no_selector("p a#foo") def test_respects_scopes(self, session): with session.scope("//p[@id='first']"): assert not session.has_no_selector(".//a[@id='foo']") assert session.has_no_selector("../a[@id='red']") def test_is_false_if_the_content_is_on_the_page_the_given_number_of_times(self, session): assert not session.has_no_selector("//p", count=3) assert not session.has_no_selector("//p//a[@id='foo']", count=1) assert not session.has_no_selector("//p[contains(.,'est')]", count=1) def test_is_true_if_the_content_is_on_the_page_the_wrong_number_of_times(self, session): assert session.has_no_selector("//p", count=6) assert session.has_no_selector("//p//a[@id='foo']", count=2) assert session.has_no_selector("//p[contains(.,'est')]", count=5) def test_is_true_if_the_content_is_not_on_the_page_at_all(self, session): assert session.has_no_selector("//abbr", count=2) assert session.has_no_selector("//p//a[@id='doesnotexist']", count=1) def test_discards_all_matches_where_the_given_string_is_contained(self, session): assert not session.has_no_selector("//p//a", text="Redirect", count=1) assert session.has_no_selector("//p", text="Doesnotexist") def test_discards_all_matches_where_the_given_regex_is_matched(self, session): assert not session.has_no_selector("//p//a", text=re.compile(r"re[dab]i", re.IGNORECASE), count=1) assert session.has_no_selector("//p//a", text=re.compile(r"Red$")) def test_only_matches_elements_that_do_not_match_exact_text_exactly(self, session): assert not session.has_no_selector("id", "h2one", exact_text="Header Class Test One") assert session.has_no_selector("id", "h2one", exact_text="Header Class Test") def test_only_matches_elements_that_do_not_match_exactly_when_exact_text_true(self, session): assert not session.has_no_selector("id", "h2one", text="Header Class Test One", exact_text=True) assert session.has_no_selector("id", "h2one", text="Header Class Test", exact_text=True) def test_does_not_match_substrings_when_exact_text_false(self, session): assert not session.has_no_selector("id", "h2one", text="Header Class Test One", exact_text=False) assert not session.has_no_selector("id", "h2one", text="Header Class Test", exact_text=False)
# # Copyright (C) 2017 Smithsonian Astrophysical Observatory # # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # wit
h this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ This is based on sherpa/sim/tests_sim_unit.py. """ from sherpa.astro import sim # This is part of #3
97 # def test_list_samplers(): """Ensure list_samplers returns a list.""" mcmc = sim.MCMC() samplers = mcmc.list_samplers() assert isinstance(samplers, list) assert len(samplers) > 0 def test_list_samplers_contents(): """Are the expected values included""" # Test that the expected values exist in this list, # but do not enforce these are the only values. This is # a slightly-different return list to the non-astro version. # samplers = sim.MCMC().list_samplers() for expected in ['mh', 'metropolismh', 'pragbayes', 'fullbayes']: assert expected in samplers
from impl import FixedClientDeauthAttack,\ SniffedClientDeauthAttack,\ GlobalDisassociationAttack class WiFiDeauthAttackBuilder(object): '''This object finds the appropriate attack for the options supplied by the user.''' @classmethod def build_from(cls, options): subclasses = WiFiDeauthAttackWrapper.__subclasses__() candidates = filter(lambda subclass: subclass.handles(options), subclasses) return candidates[0](options) class WiFiDeauthAttackWrapper(object): @classmethod def handles(cls, options): raise NotImplementedError def __init__(self, options): self.options = options def _get_attack_implementor(self): raise NotImplementedError def run(self): attack = self._get_attack_implementor() executi
ons = self.options.executions persistence_times = self.options.persistence_times return attack.run(exe
cutions, persistence_times) class FixedClientDeauthAttackWrapper(WiFiDeauthAttackWrapper): @classmethod def handles(cls, options): return len(options.client) > 0 def _get_attack_implementor(self): interface = self.options.interface bssid = self.options.bssid client = self.options.client return FixedClientDeauthAttack(interface, bssid, [client]) class GlobalDisassociationAttackWrapper(WiFiDeauthAttackWrapper): @classmethod def handles(cls, options): return len(options.client) == 0 and not options.should_sniff def _get_attack_implementor(self): interface = self.options.interface bssid = self.options.bssid return GlobalDisassociationAttack(interface, bssid) class SniffedClientDeauthAttackWrapper(WiFiDeauthAttackWrapper): @classmethod def handles(cls, options): return len(options.client) == 0 and options.should_sniff def _get_attack_implementor(self): interface = self.options.interface bssid = self.options.bssid timeout = self.options.timeout return SniffedClientDeauthAttack(interface, bssid, timeout)
[0], input_ids.shape[-1] - 1) ) outputs_cache = model( input_ids[:, :-1], attention_mask=attention_mask_cache, past_key_values=past_key_values, position_ids=position_ids, ) position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=attention_mask_cache, position_ids=position_ids, ) outputs = model(input_ids, attention_mask=attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class FlaxGPT2ModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase): all_model_classes = (FlaxGPT2Model, FlaxGPT2LMHeadModel) if is_flax_available() else () all_generative_model_classes = (FlaxGPT2LMHeadModel,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxGPT2ModelTester(self) def test_use_cache_forward(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask) def test_use_cache_forward_with_attn_mask(self): for model_class_name in self.all_model_classes: config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( model_class_name, config, input_ids, attention_mask ) @slow def test_batch_generation(self): tokenizer = GPT2Tokenizer.from_pretrained("gpt2", pad_token="</s>", padding_side="left") inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="jax", padding=True, truncation=True) model = FlaxGPT2LMHeadModel.from_pretrained("gpt2") model.do_sample = False model.config.pad_token_id = model.config.eos_token_id jit_generate = jax.jit(model.generate) output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True) expected_string = [ "Hello this is a long string of words. I'm going to try to explain what I mean.", "Hey, I'm not sure if I'm going to be able to do", ] self.assertListEqual(output_string, expected_string) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0 prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1 pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2) # overwrite from common since `attention_mask` in combination # with `causal_mask` behaves slighly differently @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) batch_size, seq_length = pt_inputs["input_ids"].shape rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): pt_inputs["attention_mask"][batch_idx, :start_index] = 0 pt_inputs["attention_mask"][batch_idx, start_index:] = 1 prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0
prepared_inputs_di
ct["attention_mask"][batch_idx, start_index:] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**prepared_inputs_dict).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs, pt_outputs): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTo
from xyz.location import build_location def test_build_location_simple(): # test Location = build_location() location = Location("C
anada", "Charlottetown") assert location.country ==
"Canada" assert location.city == "Charlottetown"
# -*- coding: utf-8 -*- # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """SHA-224 cryptographic hash algorithm. SHA-224 belongs to the SHA-2_ family of cryptographic hashes. It produces the 224 bit digest of a message. >>> from Cryptodome.Hash import SHA224 >>> >>> h = SHA224.new() >>> h.update(b'Hello') >>> print h.hexdigest() *SHA* stands for Secure Hash Algorithm. .. _SHA-2: http://csrc.nist.gov/publications/fips/fips180-2/fips180-4.pdf """ from Cryptodome.Util.py3compat import * from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer, SmartPointer, create_string_buffer, get_raw_buffer, c_size_t, expect_byte_string) _raw_sha224_lib = load_pycryptodome_raw_lib("Cryptodome.Hash._SHA224", """ int SHA224_init(void **shaState); int SHA224_destroy(void *shaState); int SHA224_update(void *hs, const uint8_t *buf, size_t len); int SHA224_digest(const void *shaState, uint8_t digest[16]); int SHA224_copy(const void *src, void *dst); """) class SHA224Hash(object): """Class that implements a SHA-224 hash """ #: The size of the resulting hash in bytes. digest_size = 28 #: The internal block size of the hash algorithm in bytes. block_size = 64 #: ASN.1 Object ID oid = '2.16.840.1.101.3.4.2.4' def __init__(self, data=None): state = VoidPointer() result = _raw_sha224_lib.SHA224_init(state.address_of()) if result: raise ValueError("Error %d while instantiating SHA224" % result) self._state = SmartPointer(state.get(), _raw_sha224_lib.SHA224_destroy) if data: self.update(data) def update(self, data): """Continue hashing of a message by consuming the next chunk of data. Repeated calls are equivalent to a single call with the concatenation of all the arguments. In other words: >>> m.update(a); m.update(b) is equivalent to: >>> m.update(a+b) :Parameters: data : byte string The next chunk of the message being hashed. """ expect_byte_string(data) result = _raw_sha224_lib.SHA224_update(self._state.get(), data, c_size_t(len(data))) if result: raise ValueError("Error %d while instantiating SHA224" % result) def digest(self): """Return the **binary** (non-printable) digest of the message that has been hashed so far. This method does not change the state of the hash object. You
can continue updating the object after calling this function. :Return: A byte string of `digest_size` bytes. It may contain non-ASCII charac
ters, including null bytes. """ bfr = create_string_buffer(self.digest_size) result = _raw_sha224_lib.SHA224_digest(self._state.get(), bfr) if result: raise ValueError("Error %d while instantiating SHA224" % result) return get_raw_buffer(bfr) def hexdigest(self): """Return the **printable** digest of the message that has been hashed so far. This method does not change the state of the hash object. :Return: A string of 2* `digest_size` characters. It contains only hexadecimal ASCII digits. """ return "".join(["%02x" % bord(x) for x in self.digest()]) def copy(self): """Return a copy ("clone") of the hash object. The copy will have the same internal state as the original hash object. This can be used to efficiently compute the digests of strings that share a common initial substring. :Return: A hash object of the same type """ clone = SHA224Hash() result = _raw_sha224_lib.SHA224_copy(self._state.get(), clone._state.get()) if result: raise ValueError("Error %d while copying SHA224" % result) return clone def new(self, data=None): return SHA224Hash(data) def new(data=None): """Return a fresh instance of the hash object. :Parameters: data : byte string The very first chunk of the message to hash. It is equivalent to an early call to `SHA224Hash.update()`. Optional. :Return: A `SHA224Hash` object """ return SHA224Hash().new(data) #: The size of the resulting hash in bytes. digest_size = SHA224Hash.digest_size #: The internal block size of the hash algorithm in bytes. block_size = SHA224Hash.block_size
""" Braces for type definition(class / struct / union / enum) should be located in the seperate line. == Violation == class K() { <== ERROR } struct K { <== ERROR } == Good == struct A() { <== CORRECT } class K() { <== CORRECT public : void Hello() { <== Don't care. It's a function definition. } } """ from nsiqunittest.nsiqcppstyle_unittestbase import * from nsiqcppstyle_rulehelper import * from nsiqcppstyle_reporter import * from nsiqcppstyle_rulemanager import * def RunRule(lexer, currentType, fullName, decl, contextStack, typeContext): if not decl and currentType != "NAMESPACE" and typeContext is not None: t = lexer.GetNextTokenInType("LBRACE", False, True) if t is not None: t2 = typeContext.endToken if t2 is not None and t.lineno != t2.lineno: prevToken = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess() # print contextStack.Peek() if prevToken is not None and prevToken.lineno == t.lineno: nsiqcppstyle_reporter.Error( t, __name__, "The brace for type definition should be located in start of line") if t2.lineno != t.lineno and GetRealColumn( t2) != GetRealColumn(t): nsiqcppstyle_reporter.Error( t2, __name__, "The brace for type definition should be located in same column") ruleManager.AddTypeNameRule(RunRule) ########################################################################## # Unit Tes
t ########################################################################## class testRule(nct): def setUpRule(self): ruleManager.AddTypeNameRule(RunRule) def test1(self): self.Analyze("thisfile.c", """ public class A { } """) self.ExpectError(__name__) def test2(self): self.Analyze("thisfile.c", """ class C : public AA { } """) self.ExpectError(__n
ame__) def test3(self): self.Analyze("thisfile.c", """ class K { void function() const { } class T { } } """) self.ExpectSuccess(__name__) def test4(self): self.Analyze("thisfile.c", """ class K { void function() const { } class T { } } """) self.ExpectError(__name__) def test5(self): self.Analyze("thisfile.c", """ class C : public AA { class T { } } """) self.ExpectError(__name__) def test6(self): self.Analyze("thisfile.c", """ class C : public AA { class T { } } """) self.ExpectError(__name__) def test7(self): self.Analyze("thisfile.c", """ class C : public AA { class T { } } """) self.ExpectSuccess(__name__) def test8(self): self.Analyze("thisfile.c", """ namespace C { } """) self.ExpectSuccess(__name__) def test9(self): self.Analyze("thisfile.c", """ if (hello) { // {kr} m_btn5 {/kr} } """) self.ExpectSuccess(__name__)
svReader def main(): file_name = "../data/disease/uniprot/humdisease.txt" mim_to_mesh_values = get_mim_to_mesh(file_name) print len(mim_to_mesh) print mim_to_mesh["600807"] return from time import clock parser = UniprotXMLParser("../data/Q12888.xml") #parser = UniprotXMLParser("../../data/phosphorylation/uniprot/uniprot-phosphorylation-large-scale-analysis.xml") #ids = parser.parse_ids() #print map(len, ids) #print ids[-1] t1 = clock() elements = parser.parse() t2 = clock() print len(elements), elements[-1] print t2-t1 return def get_uniprot_to_geneid(file_name, uniprot_ids=None, only_min=True, key_function=int): """ To parse HUMAN_9606_idmapping.dat file (trimmed to two columns) from Uniprot only_min: Chooses the "min" defined by key_function used in min() key_function: int (geneids) | len (gene symbols) Creating the file wget ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz zgrep Gene_Name HUMAN_9606_idmapping.dat.gz | cut -f 1,3 > uniprot_to_symbol.txt zgrep GeneID HUMAN_9606_idmapping.dat.gz | cut -f 1,3 > idmapping.tab OR zcat HUMAN_9606_idmapping_selected.dat.gz | cut -f 1,3 > idmapping.tab """ uniprot_to_geneids = {} #geneid_to_uniprots = {} f = open(file_name) f.readline() for line in f: uniprot, geneid = line.split("\t") geneid = geneid.strip() uniprot = uniprot.strip() if geneid == "" or uniprot == "": continue if uniprot_ids is not None and uniprot not in uniprot_ids: continue #if only_min: # geneid = min(geneid.split("; "), key=key_function) #uniprot_to_geneids[uniprot] = geneid uniprot_to_geneids.setdefault(uniprot, set()).add(geneid) f.close() if only_min: uniprot_to_geneid = {} for uniprot, geneids in uniprot_to_geneids.iteritems(): uniprot_to_geneid[uniprot] = min(geneids, key=key_function) uniprot_to_geneids = uniprot_to_geneid return uniprot_to_geneids def get_uniprot_to_geneid_from_idmapping_file(file_name, uniprot_ids=None): """ To parse idmapping.tab from Uniprot Useful for id mapping of non-human species """ parser = TsvReader.TsvReader(file_name, delim="\t", inner_delim=";") column_to_index, id_to_values = parser.read(fields_to_include=["UniProtKB-AC", "GeneID (EntrezGene)"], keys_to_include=uniprot_ids, merge_inner_values=True) uniprot_to_geneid = {} for uniprot, values in id_to_values.iteritems(): for val in values: geneid = val[column_to_index["geneid (entrezgene)"]] #if uniprot in uniprot_to_geneid: # print "multiple gene id", uniprot #uniprot_to_geneid.setdefault(uniprot, set()).add(geneid) uniprot_to_geneid[uniprot] = geneid return uniprot_to_geneid def get_mim_to_mesh(file_name): """ To parse humdisease.txt from Uniprot """ mim_to_mesh_values = {} f = open(file_name) line = f.readline() while not line.startswith("ID"): line = f.readline() words = line.strip().split() disease = " ".join(words[1:]).rstrip(".") for line in f: words = line.strip().split() if words[0] == "ID": disease = " ".join(words[1:]).rstrip(".") if words[0] == "DR": id_type = words[1].lower().rstrip(";") if id_type == "mesh": mesh = words[2].rstrip(".") elif id_type == "mim": mim = words[2].rstrip(";") if line.startswith("//"): #if mim in mim_to_mesh_values and mim_to_mesh_values[mim][1] == mesh: #continue #if mim in mim_to_mesh_values: print mim, mim_to_mesh_values[mim], disease, mesh mim_to_mesh_values.setdefault(mim, []).append((disease, mesh)) f.close() return mim_to_mesh_values class UniprotXMLParser(object): NS="{http://uniprot.org/uniprot}" psiteDesc_to_psiteChar = { "Phosphoserine": "S", "Phosphothreonine": "T", "Phosphotyrosine": "Y", "Phosphohistidine": "H" } def __init__(self, filename): self.file_name = filename #self.etree = ElementTree() return def parse_ids_high_mem(self): self.etree = ElementTree() tree = self.etree.parse(self.file_name) #ids = tree.findall(self.NS+"accession") ids = [] sub_ids = None for e in tree.getiterator(): if e.tag == self.NS+"entry": if sub_ids is not None: ids.append(sub_ids) sub_ids = [] if e.tag == self.NS+"accession": sub_ids.append(e.text) ids.append(sub_ids) return ids def parse_ids(self): ids = [] sub_ids = [] # get an iterable context = iterparse(self.file_name, ["start", "end"]) # turn it into an iterator context = iter(context) # get the root element event, root = context.next() for (event, elem) in context: if event == "end": if elem.tag == self.NS+"acc
ession": sub_ids.append
(elem.text) if elem.tag == self.NS+"entry": ids.append(sub_ids) sub_ids = [] elem.clear() root.clear() return ids def parse(self): ignored_modification_types = set() context = iterparse(self.file_name, ["start", "end"]) context = iter(context) event, root = context.next() elements = [] current_element = None current_position = None for (event, elem) in context: if event == "start": if elem.tag == self.NS+"entry": current_element = UniprotXMLElement() elif event == "end": if elem.tag == self.NS+"accession": current_element.add_id(elem.text) elif elem.tag == self.NS+"organism": db_elm = elem.find(self.NS+"dbReference") #only looks at sublevel - alternative: keep tag stack if db_elm.get("type") == "NCBI Taxonomy": current_element.set_tax(db_elm.get("id")) elif elem.tag == self.NS+"feature" and elem.get("type") == "modified residue": #print elem.getchildren() #pos_elm = elem.find(self.NS+"position") #if elem.get("status") == "probable": # continue for sub_elm in elem.getiterator(): if sub_elm.tag == self.NS+"position": pos_elm = sub_elm pos = pos_elm.get("position") desc = elem.get("description") vals = desc.split(";") type = vals[0] kinase = vals[1][vals[1].find("by")+2:].strip() if (len(vals) > 1) else None if self.psiteDesc_to_psiteChar.has_key(type): type = self.psiteDesc_to_psiteChar[type] current_element.add_psite(pos, type, kinase) else: ignored_modification_types.add(type) elif elem.tag == self.NS+"entry": seq_elm = elem.find(self.NS+"sequence") current_element.set_sequence(seq_elm.text) elements.append(current_element) elem.clear() root.clear() print "Ignored mofications: ", ignored_modification_types return elements class UniprotXMLElement(object): def __init__(self): self.ids = [] self.taxid = None self.phosphosites = [] self.sequence = None def add_id(self, id): self.ids.append(id) def set_tax(self, taxid):
"""Add autoincrement Revision ID: 73b63ad41d3 Revises: 331f2c45f5a Create Date: 2017-07-25 17:09:5
5.204538 """ # revision identifiers, used by Alembic. revision = '73b63ad41d3' down_revision = '331f2c45f5a' from alembic import op from sqlalchemy import Integer import sqlalchemy as sa def upgrade(): op.alter_column("RepositoryApp2languages", "id", existing_type=Integer, autoincrement=True, nullable=False)
def downgrade(): pass
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for UpdateDnsAuthorization # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-certificate-manager # [START certificatemanager_v1_generated_CertificateManager_UpdateDnsAuthorization_async] from google.cloud import certificate_manager_v1 async def sample_update_dns_authorization(): # Crea
te a client client = certificate_manager_v1.CertificateManagerAsyncClient() # Initialize request argument(s) dns_authorization = certificate_manager_v1.DnsAuthorization() dns_authorization.domain = "domain_value" request = certificate_manager_v1.UpdateDnsAuthorizationRequest( dns_authorization=dns_authorization,
) # Make the request operation = client.update_dns_authorization(request=request) print("Waiting for operation to complete...") response = await operation.result() # Handle the response print(response) # [END certificatemanager_v1_generated_CertificateManager_UpdateDnsAuthorization_async]
#!/usr/bin/env python #---coding=utf8--- from HomeHandler import HomeHandl
er from LoginHandler import LoginHandler from LogoutHandler import LogoutHandler from ArchivesHandler import ArchivesHandler from CategoryHandler import CategoryHandler from TagHandler import TagHandler from PageHandler import PageHandler from SearchHandler import SearchHandler from AdminHome import AdminHome from ListPost import ListPost from EditPost import EditPost from ListComment import ListComment from ListTag import ListTag from ListCategory import ListCa
tegory from ListHtml import ListHtml handlers = [ (r"/", HomeHandler), (r"/login", LoginHandler), (r"/logout",LogoutHandler), (r"/archives/([\d]*)",ArchivesHandler), (r"/category",CategoryHandler), (r"/tag",TagHandler), (r"/page",PageHandler), (r"/search",SearchHandler), (r"/admin/",AdminHome), (r"/list/post",ListPost), (r"/edit/post",EditPost), (r"/list/comment",ListComment), (r"/list/tag",ListTag), (r"/list/category",ListCategory), (r"/list/html",ListHtml), ]
from __future__ import absolute_import from collections import defaultdict from django.db import transaction from django.utils.translation import ugettext as _ from zerver.lib.exceptions import JsonableError from zerver.models import UserProfile, Realm, UserGroupMembership, UserGroup from typing import Dict, Iterable, List, Tuple, Any def access_user_group_by_id(user_group_id: int, user_profile: UserProfile) -> UserGroup: try: user_group = UserGroup.objects.get(id=user_group_id, realm=user_profile.r
ealm) group_member_ids = get_user_group_members(user_group) msg = _("Only group members and organization administrators can administer this group.") if (not user_profile.is_realm_admin and user_profile.id not in group_member_ids): raise JsonableError(msg) except UserGroup.DoesNotExist: raise JsonableError(_("Invalid user group")) return user_group def user_groups_in_realm(realm: Realm) -> List[UserGroup]:
user_groups = UserGroup.objects.filter(realm=realm) return list(user_groups) def user_groups_in_realm_serialized(realm: Realm) -> List[Dict[str, Any]]: """This function is used in do_events_register code path so this code should be performant. We need to do 2 database queries because Django's ORM doesn't properly support the left join between UserGroup and UserGroupMembership that we need. """ realm_groups = UserGroup.objects.filter(realm=realm) group_dicts = {} # type: Dict[str, Any] for user_group in realm_groups: group_dicts[user_group.id] = dict( id=user_group.id, name=user_group.name, description=user_group.description, members=[], ) membership = UserGroupMembership.objects.filter(user_group__realm=realm).values_list( 'user_group_id', 'user_profile_id') for (user_group_id, user_profile_id) in membership: group_dicts[user_group_id]['members'].append(user_profile_id) for group_dict in group_dicts.values(): group_dict['members'] = sorted(group_dict['members']) return sorted(group_dicts.values(), key=lambda group_dict: group_dict['id']) def get_user_groups(user_profile: UserProfile) -> List[UserGroup]: return list(user_profile.usergroup_set.all()) def check_add_user_to_user_group(user_profile: UserProfile, user_group: UserGroup) -> bool: member_obj, created = UserGroupMembership.objects.get_or_create( user_group=user_group, user_profile=user_profile) return created def remove_user_from_user_group(user_profile: UserProfile, user_group: UserGroup) -> int: num_deleted, _ = UserGroupMembership.objects.filter( user_profile=user_profile, user_group=user_group).delete() return num_deleted def check_remove_user_from_user_group(user_profile: UserProfile, user_group: UserGroup) -> bool: try: num_deleted = remove_user_from_user_group(user_profile, user_group) return bool(num_deleted) except Exception: return False def create_user_group(name: str, members: List[UserProfile], realm: Realm, description: str='') -> UserGroup: with transaction.atomic(): user_group = UserGroup.objects.create(name=name, realm=realm, description=description) UserGroupMembership.objects.bulk_create([ UserGroupMembership(user_profile=member, user_group=user_group) for member in members ]) return user_group def get_user_group_members(user_group: UserGroup) -> List[UserProfile]: members = UserGroupMembership.objects.filter(user_group=user_group) return [member.user_profile.id for member in members] def get_memberships_of_users(user_group: UserGroup, members: List[UserProfile]) -> List[int]: return list(UserGroupMembership.objects.filter( user_group=user_group, user_profile__in=members).values_list('user_profile_id', flat=True))
from bing_search_api import BingSearchAPI my_key = "MEL5FOrb1H5G1E78YY8N5mkfcvUK2hNBYsZl1aAEEbE" def query(query_string): b
ing = BingSearchAPI(my_key) params = {'ImageFilters':'"Face:Face"', '$format': 'json', '$top': 10, '$skip': 0} results = bing.search('web',query_string,pa
rams).json() # requests 1.0+ return [result['Url'] for result in results['d']['results'][0]['Web']] if __name__ == "__main__": query_string = "Your Query" print query(query_string)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migr
ations, models class Migration(migrations.Migration): dependencies = [ ('editorial', '0062_auto_20171202_1413'), ] operations = [ migrations.AddField( model_name='assignment', name='complete', field=mode
ls.BooleanField(default=False, help_text=b'Is the assignment complete?'), ), ]
.read_gld_service(gld_bean) return "1" # 置为草稿按钮 @http.route('/WechatGLD/gld_state_draft', type='http', auth="public", csrf=False) def gld_state_draft(self, wechat_gldid, userid): gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)]) if gld_bean: request.env['syt.oa.gld'].sudo(userid).gld_state_draft_service(gld_bean) return "1" else: return "2" # 不在本人审批范围按钮 @http.route('/WechatGLD/waiver', type='http', auth="public", csrf=False) def waiver(self, wechat_gldid, userid): gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)]) employee = request.env['hr.employee'].sudo().search([('user_id', '=', int(userid))]) if gld_bean: request.env['syt.oa.gld'].sudo(userid).waiver_service(gld_bean, userid, employee) return "1" else: return "2" # 展示添加审批人页面 @http.route('/WechatGLD/view_appr', type='http', auth="public", csrf=False) def view_appr(self, no, name, userid): values = {"no": no, "name": name, "userid": userid} return request.render('ToproERP_WeChat_GLD.view_appr', values) # 展示查看抄送人页面 @http.route('/WechatGLD/select_appr_copy_user', type='http', auth="public", csrf=False) def select_appr_copy_user(self, no): values = {"no": no} gld = request.env['syt.oa.gld'].sudo().search([('name', '=', no)]) return request.render('ToproERP_WeChat_GLD.select_appr_copy_user', {'values': values, 'people': gld.copy_users}) # 获得当前单据的所有抄送人 @http.route('/WechatGLD/get_copy_user', type='http', auth="public", csrf=False) def get_copy_user(self, wechat_gldid): gld = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)]) temp_list = [] if gld: for copy_user in gld.copy_users: # request.env['hr.employee'].search([('id', '=', int(employee_id))]) temp_item = {} temp_item['name'] = copy_user.name temp_item['company_name'] = copy_user.department_id.company_id.name temp_item['dept'] = copy_user.department_id.name temp_item['job_name'] = copy_user.job_id.name image = '/web/binary/image?model=hr.employee&field=image&id=' + str(copy_user.id) + '&resize=' temp_item['image'] = image temp_list.append(temp_item) return JSONEncoder().encode(temp_list) # 通过按钮添加审批人/抄送人 按钮 @http.route('/WechatGLD/add_approver_service'
, type='http', auth="public", csrf=False) def add_approver_service(self, wechat_gldid, employee_id, name, userid): gld_bean = request.env['syt.oa.gld'].sudo().search([('name', '=', wechat_gldid)]) employee = reque
st.env['hr.employee'].sudo().search([('id', '=', int(employee_id))]) request.uid = userid if name == u"添加抄送人": result = request.env['syt.oa.gld.add.peoper.wizard'].sudo(userid).add_copy_peoper_service(gld_bean, employee, '', 2) elif name == u"添加审批人": result = request.env['syt.oa.gld.add.approver.wizard'].sudo(userid).add_approver_service(gld_bean, employee, 2) if result == "2": return "2" elif result == "3": return "3" else: return "1" @http.route('/WechatGLD/get_signature', type='http', auth="public", csrf=False) def get_signature(self, url): ''' 获得微信的ticket 缓存起来 :return: ''' cookie = Cookie.SimpleCookie() # access_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=wx0046935c06f7c27e&corpsecret=fLuTp-KCwaG-HAPcsKZch0xNkNV2ahjMPmi1S4F_LnlP8rkJmsx7jVc931ljr46A' access_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=wxc1317b61e7e122aa&corpsecret=EGjHS5l3ee0gBSvr29zgZN2HqG4r2tPbtr-LBpRqgoEC-4EqQrvPqQQGXrc1QxpH' request_ = urllib2.Request(access_token_url) opener = urllib2.build_opener() conn = opener.open(request_) access_token_list = conn.read() access_token_list = json.loads(access_token_list) if len(cookie) == 0: cookie["access_token"] = access_token_list["access_token"] request.session['access_token'] = access_token_list["access_token"] if len(cookie) > 0: cookie_ticket = Cookie.SimpleCookie() ticket_url = 'https://qyapi.weixin.qq.com/cgi-bin/get_jsapi_ticket?access_token=' + cookie[ "access_token"].value request_ = urllib2.Request(ticket_url) opener = urllib2.build_opener() conn = opener.open(request_) ticket_list = conn.read() ticket_list = json.loads(ticket_list) if len(cookie_ticket) == 0: cookie_ticket["ticket"] = ticket_list["ticket"] ret_list = [] ret = {} ret["nonceStr"] = self.__create_nonce_str() # 创建随机字符串 ret["jsapi_ticket"] = cookie_ticket["ticket"].value ret["timestamp"] = self.__create_timestamp() # 创建时间戳 ret["url"] = url signature = self.sign(ret) ret["signature"] = signature ret_list.append(ret) return JSONEncoder().encode(ret_list) def __create_nonce_str(self): ''' 创建随机字符串 nonceStr :return: ''' return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15)) def __create_timestamp(self): ''' 创建时间戳 timestamp :return: ''' return int(time.time()) def sign(self, ret): ''' 返回一个加密的signature 签名 :return: ''' string = '&'.join(['%s=%s' % (key.lower(), ret[key]) for key in sorted(ret)]) signature = hashlib.sha1(string).hexdigest() return signature def _compute_checksum(self, bin_data): """ compute the checksum for the given datas :param bin_data : datas in its binary form """ # an empty file has a checksum too (for caching) return hashlib.sha1(bin_data or '').hexdigest() @http.route('/WechatGLD/downloadImage', type='http', auth="public", csrf=False) def downloadImage(self, media_id): cookie = Cookie.SimpleCookie() # access_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=wx0046935c06f7c27e&corpsecret=fLuTp-KCwaG-HAPcsKZch0xNkNV2ahjMPmi1S4F_LnlP8rkJmsx7jVc931ljr46A' access_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=wxc1317b61e7e122aa&corpsecret=EGjHS5l3ee0gBSvr29zgZN2HqG4r2tPbtr-LBpRqgoEC-4EqQrvPqQQGXrc1QxpH' access_token_request = urllib2.Request(access_token_url) opener = urllib2.build_opener() conn = opener.open(access_token_request) access_token_list = conn.read() access_token_list = json.loads(access_token_list) if len(cookie) == 0: cookie["access_token"] = access_token_list["access_token"] downloadImage_url = 'https://qyapi.weixin.qq.com/cgi-bin/media/get?access_token=' + cookie[ "access_token"].value + '&media_id=' + media_id + '' # wechat = WeChatEnterprise(agentid=1) # file = wechat.get_media(media_id) wechat = request.env['wechat.enterprise.config'].get_wechat() file = wechat.get_media(media_id) url = downloadImage_url f = urllib2.urlopen(url) attachment = request.env['ir.attachment'] verification_code = random.randint(1000, 9999) # 4位随机码 vals = {} vals["db_datas"] = file vals['datas'] = base64.encodestring(f.read()) vals['name'] = str(verification_code) + ".jpg" vals['datas_fname'] = str(verification_
#!/usr/bin/env python # ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- '''Demonstrates how to handle a platform-specific event not defined in pyglet by subclassing Window. This is not for the faint-hearted! A message will be printed to stdout when the following events are caught: - On Mac OS X, the window drag region is clicked. - On Windows, the display resolution is changed. - On Linux, the window properties are changed. ''' import pyglet # Check for Carbon (OS X) try: from pyglet.window.carbon import * _have_carbon = True except ImportError: _have_carbon = False # Check for Win32 try: from pyglet.window.win32 import * from pyglet.window.win32.constants import * _have_win32 = True except ImportError: _have_win32 = False # Check for Xlib (Linux) try: from p
yglet.window.xlib import * _have_xlib = True except ImportError: _have_xlib = False # Subclass Window class MyWindow(pyglet.window.Window): if _have_carbon: @CarbonEventHandler(kEventClassWindow, kEventWindowClickDragRgn) def _on_window_click_drag_rgn(self, next_handler, event, data): print 'Clicked drag rgn.'
carbon.CallNextEventHandler(next_handler, event) return noErr if _have_win32: @Win32EventHandler(WM_DISPLAYCHANGE) def _on_window_display_change(self, msg, lParam, wParam): print 'Display resolution changed.' return 0 if _have_xlib: @XlibEventHandler(xlib.PropertyNotify) def _on_window_property_notify(self, event): print 'Property notify.' if __name__ == '__main__': window = MyWindow() pyglet.app.run()
e(self, volume): """Create a volume.""" opts = huawei_utils.get_volume_params(volume) smartx_opts = smartx.SmartX().get_smartx_specs_opts(opts) params = huawei_utils.get_lun_params(self.xml_file_path, smartx_opts) pool_name = volume_utils.extract_host(volume['host'], level='pool') pools = self.restclient.find_all_pools() pool_info = self.restclient.find_pool_info(pool_name, pools) if not pool_info: msg = (_('Error in getting pool information for the pool: %s.') % pool_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(volume['id']) volume_description = volume['name'] volume_size = huawei_utils.get_volume_size(volume) LOG.info(_LI( 'Create volume: %(volume)s, size: %(size)s.'), {'volume': volume_name, 'size': volume_size}) params['pool_id'] = pool_info['ID'] params['volume_size'] = volume_size params['volume_description'] = volume_description # Prepare LUN parameters. lun_param = huawei_utils.init_lun_parameters(volume_name, params) # Create LUN on the array. lun_info = self.restclient.create_volume(lun_param) lun_id = lun_info['ID'] try: qos = huawei_utils.get_volume_qos(volume) if qos: smart_qos = smartx.SmartQos(self.restclient) smart_qos.create_qos(qos, lun_id) smartpartition = smartx.SmartPartition(self.restclient) smartpartition.add(opts, lun_id) smartcache = smartx.SmartCache(self.restclient) smartcache.add(opts, lun_id) except Exception as err: self._delete_lun_with_check(lun_id) raise exception.InvalidInput( reason=_('Create volume error. Because %s.') % err) return {'provider_location': lun_info['ID'], 'ID': lun_id, 'lun_info': lun_info} @utils.synchronized('huawei', external=True) def delete_volume(self, volume): """Delete a volume. Three steps: Firstly, remove associate from lungroup. Secondly, remove associate from QoS policy. Thirdly, remove the lun. """ name = huawei_utils.encode_name(volume['id']) lun_id = volume.get('provider_location') LOG.info(_LI('Delete volume: %(name)s, array lun id: %(lun_id)s.'), {'name': name, 'lun_id': lun_id},) if lun_id: if self.restclient.check_lun_exist(lun_id): qos_id = self.restclient.get_qosid_by_lunid(lun_id) if qos_id: self.remove_qos_lun(lun_id, qos_id) self.restclient.delete_lun(lun_id) else: LOG.warning(_LW("Can't find lun %s on the array."), lun_id) return False return True def remove_qos_lun(self, lun_id, qos_id): lun_list = self.restclient.get_lun_list_in_qos(qos_id) lun_count = len(lun_list) if lun_count <= 1: qos = smartx.SmartQos(self.restclient) qos.delete_qos(qos_id) else: self.restclient.remove_lun_from_qos(lun_id, lun_list, qos_id) def _delete_lun_with_check(self, lun_id): if lun_id: if self.restclient.check_lun_exist(lun_id): qos_id = self.restclient.get_qosid_by_lunid(lun_id) if qos_id: self.remove_qos_lun(lun_id, qos_id) self.restclient.delete_lun(lun_id) def _is_lun_migration_complete(self, src_id, dst_id): result = self.restclient.get_lun_migration_task() found_migration_task = False if 'data' in result: for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): found_migration_task = True if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']: return True if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']: err_msg = (_('Lun migration error.')) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) if not found_migration_task: err_msg = (_("Cannot find migration task.")) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) return False def _is_lun_migration_exist(self, src_id, dst_id): try: result = self.restclient.get_lun_migration_task() except Exception: LOG.error(_LE("Get LUN migration error.")) return False if 'data' in result: for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): return True return False def _migrate_lun(self, src_id, dst_id): try: self.restclient.create_lun_migration(src_id, dst_id) def _is_lun_migration_complete(): return self._is_lun_migration_complete(src_id, dst_id) wait_interval = constants.MIGRATION_WAIT_INTERVAL huawei_utils.wait_for_condition(self.xml_file_path, _is_lun_migration_complete, wait_interval) # Clean up if migration failed. except Exception as ex: raise exception.VolumeBackendAPIException(data=ex) finally: if self._is_lun_migration_exist(src_id, dst_id): self.restclient.delete_lun_migration(src_id, dst_id) self._delete_lun_with_check(dst_id) LOG.debug("Migrate lun %s successfully.", src_id) return True def _wait_volume_ready(self, lun_id): event_type = 'LUNReadyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path, event_type) def _volume_ready(): result = self.restclient.get_lun
_info(lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(self.xml_file_path, _volume_ready, wait_interval, wait_interval * 10) def _get_original_status(self, volume): if not volume['volume_attachment']: return 'available' else: return 'in-use' def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status=None): original_name = huawei_utils.encode_name(volume['id']) current_name = huawei_utils.encode_name(new_volume['id']) lun_id = self.restclient.get_volume_by_name(current_name) try: self.restclient.rename_lun(lun_id, original_name) except exception.VolumeBackendAPIException: LOG.error(_LE('Unable to rename lun %s on array.'), current_name) return {'_name_id': new_volume['_name_id'] or new_volume['id']} LOG.debug("Rename lun from %(current_name)s to %(original_name)s " "successfully.", {'current_name': current_name, 'original_name': original_name}) model_update = {'_name_id': None} return model_update def migrate_volume(self, ctxt, volume, host, new_type=None): """Migrate a volume within the same array.""" return self._migrate_volume(volume, host, ne
></script> <script type="text/javascript" src="source/includes/scripts/genjscript.js"></script> <script type="text/javascript" src="source/includes/scripts/phpjs_00029.js"></script> <script type="text/javascript" src="source/includes/scripts/jquery.jdMenu.js"></script> <script type="text/javascript" src="source/includes/scripts/jquery.bgiframe.js"></script> <script type="text/javascript" src="source/includes/scripts/jquery.positionBy.js"></script> <script type="text/javascript" src="source/includes/scripts/jquery.dimensions.js"></script> <script type="text/javascript" src="/js/maxlines.js"></script> <!-- <# JSHOOK_GCHART #> --> <!--[if lt IE 7]> <script src="/js/pngfix.js" type="text/javascript"></script> <script>DD_belatedPNG.fix('.pngfix');</script> <![endif]--> <script type="text/javascript"> function loginMenu(){ if (document.getElementById('loginMenu').style.display == 'none'){ document.getElementById('loginMenu').style.display = 'block'; $("#loginTab").attr("class","button6"); }else{ document.getElementById('loginMenu').style.display = 'none'; $("#loginTab").attr("class","button3"); } } </script> <script type="text/javascript"> <!-- if (top.location!= self.location) { top.location = self.location.href } //--> </script> <script language="vbscript" type="text/vbs"> set_ie_alert() Function vb_alert(msg_str) MsgBox msg_str,vbOKOnly+vbInformation+vbApplicationModal,alert_title End Function </script> </head> <body> <script type="text/javascript"> var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-23829964-1']); _gaq.push(['_trackPageview']); (function() { var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); })(); </script> <!-- Place this tag in your head or just before your close body tag --> <script type="text/javascript" src="https://apis.google.com/js/plusone.js"></script> <script type="text/javascript" src="//platform.twitter.com/widgets.js"></script> <div id="ad" style="font-
size:17px;padding:5px;display:none;"></div> <div id="all"> <div class="members_bar">
<div class="guest_links"> <a onmouseover="loginMenu();" id="loginTab" class="button3" href="#">Log In</a>&nbsp; <a href="signup" class="button3" >Register</a> <div id="loginMenu" class="button5" style="display: none;"> <form action="users.php?act=login-d" method="post"> <p><label>Username:&nbsp;</label><input name="username" class="textfield" type="text" /></p> <p><label>Password:&nbsp;</label><input name="password" class="textfield" type="password" /></p> <br /><br /><p><a href="javascript:void(0);" onclick="toggle_lightbox('users.php?act=lost_password', 'lost_password_lightbox');">Reset Password</a> &nbsp; <input type="submit" value="Log In" class="button1" /></p> </form> </div> </div> </div> <div class="logo_cell"> <a href="./" style="float: left;" class="logo"></a> <div style=""><img src="./theme/images/blank.gif" height="0" width="0" alt="blank" /></div> <ul id="navlink"> <li><a href="./blog" class="navlink">News</a></li> <li><a href="/premium" class="navlink">Premium</a></li> <li><a href="./affiliate" class="navlink">Affiliate</a></li> <li><a href="./tools" class="navlink">Tools</a></li> </ul> </div> <div class="page_cell"> <div id="page_body" class="page_body"><script type="text/javascript"> // <![CDATA[ var scaled = false; function scaleonload(){ e = document.getElementById('show_image'); scale(e); } function scale(e){ if(scaled){ e.width=originalWidth; e.height=originalHeight; scaled=false; }else{ if(e.width>908){ originalWidth = e.width; originalHeight = e.height; e.style.cursor = "url(/theme/magnify.cur), default"; e.width=908; e.height = Math.ceil(908*(originalHeight/originalWidth)); scaled=true; } } } // ]]> </script> <center> <div class="sidebar2"> <div> <a class="removeAds" href="premium">Remove ads [x]</a> </div> <a href="http://www.3dtoontube.com/?t=3dimgchili" target="_blank"><img src="http://imgchili.com/media/tube/728x90h.jpg" alt="3dtoontube" /></a> </div> </center> <script src="/js/showa.js" type="text/javascript"></script> <center><br /> <img id="show_image" onload="scale(this);" onclick="scale(this);" src="http://i1.imgchili.com/2765/2765317_9.jpg" alt="2765317_9.jpg" /></center> <div> <table cellpadding="4" cellspacing="1" border="0" style="width: 100%;"> <tr> <td colspan="2" class="tdrow2"> <center> <script src="http://www.stumbleupon.com/hostedbadge.php?s=1"></script> &nbsp; &nbsp; <a href="https://twitter.com/share" class="twitter-share-button" data-count="horizontal">Tweet</a> <g:plusone size="medium"></g:plusone></center> </td> </tr> <tr> <td colspan="2" class="tdrow2"> <div class="sidebar"> <div> <a class="removeAds" href="/premium">Remove ads [x]</a> </div> <center><iframe src='http://feeds.videosz.com/spots/index.php?sid=86' frameborder='0' scrolling='no' width='600' height='260'></iframe></center> </div> </td> </tr> <tr> <td colspan="2" class="tdrow2"><br /><br /> <table cellpadding="5" cellspacing="0" border="0" style="width: 100%;"> <tr> <td style="width: 20%;" valign="middle" class="text_align_center"> <a href="http://imgchili.com/show/2765/2765317_9.jpg"><img src="http://t1.imgchili.com/2765/2765317_9.jpg" alt="2765317_9.jpg" /></a> </td> <td style="width: 80%;"> <table cellspacing="1" cellpadding="0" border="0" style="width: 100%;"> <tr> <td><input readonly="readonly" class="input_field" onfocus="javascript: this.select()" type="text" style="width: 555px;" value="http://imgchili.com/show/2765/2765317_9.jpg" /></td> <td>Share Link</td> </tr> <tr> <td><input readonly="readonly" class="input_field" onfocus="javascript: this.select()" type="text" style="width: 555px;" value="&lt;a href=&quot;http://imgchili.com/show/2765/2765317_9.jpg&quot; target=&quot;_blank&quot;&gt;&lt;img src=&quot;http://t1.imgchili.com/2765/2765317_9.jpg&quot; border=&quot;0&quot; alt=&quot;2765317_9.jpg&quot; /&gt;&lt;/a&gt;" /></td> <td>Thumbnail for Website</td> </tr> <tr> <td><input readonly="readonly" class="input_field" onfocus="javascript: this.select()" type="text" style="width: 555px;" value="[URL=http://imgchili.com/show/2765/2765317_9.jpg][IMG]http://t1.imgchili.com/2765/2765317_9.jpg[/IMG][/URL]" /></td> <td>Thumbnail for Forum</td> </tr> <tr> <td><input readonly="readonly" class="input_field" onfocus="javascript: this.select()" type="text" style="width: 555px;" value="&lt;a href=&quot;http://imgchili.com/&quot;&gt;Free image hosting&lt;/a&gt; by imgChili." /></td> <td>Link to Us</td> </tr> </table> </td> </tr> </table> </td> </tr
atus_code == 200 def test_reminderState_with_embed_20(self): filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=eventBooking,seasonBooking")) assert filter_response.status_code == 200 def test_reminderState_with_embed_21(self): filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=eventBooking,seasonBooking,transcodeBooking")) assert filter_response.status_code == 200 def test_reminderState_with_embed_22(self): filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking")) assert filter_response.status_code == 200 def test_reminderState_with_embed_23(self): filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking")) assert filter_response.status_code == 200 def test_reminderState_with_bookingType_24(self): filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&bookingType=manual")) assert filter_response.status_code == 200 def test_reminderState_with_bookingType_25(self): filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&bookingType=event")) assert filter_response.status_code == 200 def test_reminderState_with_bookingType_26(self): filter_response = call_ref_url("get", make_booking_filter_url("reminderState=notReminded&bookingType=manual,event")) assert filter_response.status_code == 200 class TestdownloadStateMajor: def test_downloadState_with_recordingState_1(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingState=notStarted")) assert filter_response.status_code == 200 def test_downloadState_with_recordingState_2(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingState=inProgress")) assert filter_response.status_code == 200 def test_downloadState_with_recordingState_3(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notSt
arted&recordingState=notStarted,inProgress")) assert filter_response.status_code == 200 def test_downloadState_with_recordingContentState_4(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingContentState=partial")) assert filter_response.status_code == 200 def test_downloadState_with_recordingContentState_5(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingCo
ntentState=complete")) assert filter_response.status_code == 200 def test_downloadState_with_recordingContentState_6(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&recordingContentState=partial,complete")) assert filter_response.status_code == 200 def test_downloadState_with_downloadContentState_7(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&downloadContentState=partial")) assert filter_response.status_code == 200 def test_downloadState_with_downloadContentState_8(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&downloadContentState=complete")) assert filter_response.status_code == 200 def test_downloadState_with_downloadContentState_9(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&downloadContentState=partial,complete")) assert filter_response.status_code == 200 def test_downloadState_with_embed_10(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking")) assert filter_response.status_code == 200 def test_downloadState_with_embed_11(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=seasonBooking")) assert filter_response.status_code == 200 def test_downloadState_with_embed_12(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=transcodeBooking")) assert filter_response.status_code == 200 def test_downloadState_with_embed_13(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=transcodeSeasonBooking")) assert filter_response.status_code == 200 def test_downloadState_with_embed_14(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=reminderBooking")) assert filter_response.status_code == 200 def test_downloadState_with_embed_15(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking,seasonBooking")) assert filter_response.status_code == 200 def test_downloadState_with_embed_16(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking,seasonBooking,transcodeBooking")) assert filter_response.status_code == 200 def test_downloadState_with_embed_17(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking")) assert filter_response.status_code == 200 def test_downloadState_with_embed_18(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&embed=eventBooking,seasonBooking,transcodeBooking,transcodeSeasonBooking,reminderBooking")) assert filter_response.status_code == 200 def test_downloadState_with_bookingType_19(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&bookingType=manual")) assert filter_response.status_code == 200 def test_downloadState_with_bookingType_20(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&bookingType=event")) assert filter_response.status_code == 200 def test_downloadState_with_bookingType_21(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=notStarted&bookingType=manual,event")) assert filter_response.status_code == 200 def test_downloadState_with_recordingState_22(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingState=notStarted")) assert filter_response.status_code == 200 def test_downloadState_with_recordingState_23(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingState=inProgress")) assert filter_response.status_code == 200 def test_downloadState_with_recordingState_24(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingState=notStarted,inProgress")) assert filter_response.status_code == 200 def test_downloadState_with_recordingContentState_25(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingContentState=partial")) assert filter_response.status_code == 200 def test_downloadState_with_recordingContentState_26(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingContentState=complete")) assert filter_response.status_code == 200 def test_downloadState_with_recordingContentState_27(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&recordingContentState=partial,complete")) assert filter_response.status_code == 200 def test_downloadState_with_downloadContentState_28(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&downloadContentState=partial")) assert filter_response.status_code == 200 def test_downloadState_with_downloadContentState_29(self): filter_response = call_ref_url("get", make_booking_filter_url("downloadState=inProgress&downloadContentState=complete")) assert filter_response.status_code == 200 def test_downloadState
tions to export object detection inference graph.""" import logging import os import tensorflow as tf from tensorflow.python import pywrap_tensorflow from tensorflow.python.client import session from tensorflow.python.framework import graph_util from tensorflow.python.framework import importer from tensorflow.python.platform import gfile from tensorflow.python.saved_model import signature_constants from tensorflow.python.training import saver as saver_lib from object_detection.builders import model_builder from object_detection.core import standard_fields as fields from object_detection.data_decoders import tf_example_decoder slim = tf.contrib.slim # TODO: Replace with freeze_graph.freeze_graph_with_def_protos when # newer version of Tensorflow becomes more common. def freeze_graph_with_def_protos( input_graph_def, input_saver_def, input_checkpoint, output_node_names, restore_op_name, filename_tensor_name, clear_devices, initializer_nodes, variable_names_blacklist=''): """Converts all variables in a graph and checkpoint into constants.""" del restore_op_name, filename_tensor_name # Unused by updated loading code. # 'input_checkpoint' may be a prefix if we're using Saver V2 format if not saver_lib.checkpoint_exists(input_checkpoint): raise ValueError( 'Input checkpoint "' + input_checkpoint + '" does not exist!') if not output_node_names: raise ValueError( 'You must supply the name of a node to --output_node_names.') # Remove all the explicit device specifications for this node. This helps to # make the graph more portable. if clear_devices: for node in input_graph_def.node: node.device = '' _ = importer.import_graph_def(input_graph_def, name='') with session.Session() as sess: if input_saver_def: saver = saver_lib.Saver(saver_def=input_saver_def) saver.restore(sess, input_checkpoint) else: var_list = {} reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint) var_to_shape_map = reader.get_variable_to_shape_map() for key in var_to_shape_map: try: tensor = sess.graph.get_tensor_by_name(key + ':0') except KeyError: # This tensor doesn't exist in the graph (for example it's # 'global_step' or a similar housekeeping element) so skip it. continue var_list[key] = tensor saver = saver_lib.Saver(var_list=var_list) saver.restore(sess, input_checkpoint) if initializer_nodes: sess.run(initializer_nodes) variable_names_blacklist = (variable_names_blacklist.split(',') if variable_names_blacklist else None) output_graph_def = graph_util.convert_variables_to_constants( sess, input_graph_def, output_node_names.split(','), variable_names_blacklist=variable_names_blacklist) return output_graph_def def get_frozen_graph_def(inference_graph_def, use_moving_averages, input_checkpoint, output_node_names): """Freezes all variables in a graph definition.""" saver = None if use_moving_averages: variable_averages = tf.train.ExponentialMovingAverage(0.0) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) else: saver = tf.train.Saver() frozen_graph_def = freeze_graph_with_def_protos( input_graph_def=inference_graph_def, input_saver_def=saver.as_saver_def(), input_checkpoint=input_checkpoint, output_node_names=output_node_names, restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', clear_devices=True, initializer_nodes='') return frozen_graph_def # TODO: Support batch tf example inputs. def _tf_example_input_placeholder(): tf_example_placeholder = tf.placeholder( tf.string, shape=[], name='tf_example') tensor_dict = tf_example_decoder.TfExampleDecoder().decode( tf_example_placeholder) image = tensor_dict[fields.InputDataFields.image] return tf.expand_dims(image, axis=0) def _image_tensor_input_placeholder(): return tf.placeholder(dtype=tf.uint8, shape=(1, None, None, 3), name='image_tensor') def _encoded_image_string_tensor_input_placeholder(): image_str = tf.placeholder(dtype=tf.string, shape=[], name='encoded_image_string_tensor') image_tensor = tf.image.decode_image(image_str, channels=3) image_tensor.set_shape((None, None, 3)) return tf.expand_dims(image_tensor, axis=0) input_placeholder_fn_map = { 'image_tensor': _image_tensor_input_placeholder, 'encoded_image_string_tensor': _encoded_image_string_tensor_input_placeholder, 'tf_example': _tf_example_input_placeholder, } def _add_output_tensor_nodes(postprocessed_tensors): """Adds output nodes for detection boxes and scores. Adds the following nodes for output tensors - * num_detections: float32 tensor of shape [batch_size]. * detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4] containing detected boxes. * detection_scores: float32 tensor of shape [batch_size, num_boxes] containing scores for the detected boxes. * detection_classes: float32 tensor of shape [batch_size, num_boxes] containing class predictions for the detected boxes. * detection_masks: (Optional) float32 tensor of shape [batch_size, num_boxes, mask_height, mask_width] containing masks for each detection box. Args: postprocessed_tensors: a dictionary containing the following fields 'detection_boxes': [batch, max_detections, 4] 'detection_scores': [batch, max_detections] 'detection_classes': [batch, max_detections] 'detection_masks': [batch, max_detections, mask_height, mask_width] (optional). 'num_detections': [batch] Returns: A tensor dict containing the added output tensor nodes. """ label_id_offset = 1 boxes = postprocessed_tensors.get('detection_boxes') scores = postprocessed_tensors.get('detection_scores') classes = postprocessed_tensors.get('detection_classes') + label_id_offset masks = postprocessed_tensors.get('detection_masks') num_detections = postprocessed_tensors.get('num_detections') outputs = {} outputs['detection_boxes'] = tf.identity(boxes, name='detection_boxes') outputs['detection_scores'] = tf.identity(scores, name='detection_scores') outputs['detection_classes'] = tf.identity(classes, name='detection_classes') outputs['num_detections'] = tf.identity(num_detections, name='num_detections') if masks is not None: outputs['detection_masks'] = tf.identity(masks, name='detection_masks') return outputs def _write_inference_graph(inference_graph_path, checkpoint_path=None, use_moving_averages=False, output_node_names=(
'num_detections,detection_scores,' 'detection_boxes,detection_classes')): """Writes inference graph to disk with the option to bake in weights. If checkpoint_path is not None bakes the weights into the graph thereby eliminating the need of checkpoint files during inference. If the model was trained with moving averages, setting use_moving_
averages to true restores the moving averages, otherwise the original set of variables is restored. Args: inference_graph_path: Path to write inference graph. checkpoint_path: Optional path to the checkpoint file. use_moving_averages: Whether to export the original or the moving averages of the trainable variables from the checkpoint. output_node_names: Output tensor names, defaults are: num_detections, detection_scores, detection_boxes, detection_classes. """ inference_graph_def = tf.get_default_graph().as_graph_def() if checkpoint_path: output_graph_def = get_frozen_graph_def( inference_graph_def=inference_graph_def, use_moving_averages=use_moving_averages, i
import os import glob ##################################################### ######Init the files################################## ##################################################### os.remove("a0.txt") os.remove("a1.txt") os.remove("a2.txt") os.remove("a3.txt") os.remove("a4.txt") os.remove("a5.txt") os.remove("a6.txt") os.remove("a7.txt") os.remove("a8.txt") os.remove("a9.txt") os.remove("n0.txt") os.remove("n1.txt") os.remove("n2.txt") os.remove("n3.txt") os.remove("n4.txt") os.remove("n5.txt") os.remove("n6.txt") os.remove("n7.txt") os.remove("n8.txt") os.remove("n9.txt") os.remove("v0.txt") os.remove("v1.txt") os.remove("v2.txt") os.remove("v3.txt") os.remove("v4.txt") os.remove("v5.txt") os.remove("v6.txt") os.remove("v7.txt") os.remove("v8.txt") os.remove("v9.txt") file_a0 = open("a0.txt", "a") file_a1 = open("a1.txt", "a") file_a2 = open("a2.txt", "a") file_a3 = open("a3.txt", "a") file_a4 = open("a4.txt", "a") file_a5 = open("a5.txt", "a") file_a6 = open("a6.txt", "a") file_a7 = open("a7.txt", "a") file_a8 = open("a8.txt", "a") file_a9 = open("a9.txt", "a") format_a = [file_a0,file_a1,file_a2,file_a3,file_a4,file_a5,file_a6,file_a7,file_a8,file_a9] file_n0 = open("n0.txt", "a") file_n1 = open("n1.txt", "a") file_n2 = open("n2.txt", "a") file_n3 = open("n3.txt", "a") file_n4 = open("n4.txt", "a") file_n5 = open("n5.txt", "a") file_n6 = open("n6.txt", "a") file_n7 = open("n7.txt", "a") file_n8 = open("n8.txt", "a") file_n9 = open("n9.txt", "a") format_n = [file_n0,file_n1,file_n2,file_n3,file_n4,file_n5,file_n6,file_n7,file_n8,file_n9] file_v0 = open("v0.txt", "a") file_v1 = open("v1.txt", "a") file_v2 = open("v2.txt", "a") file_v3 = open("v3.txt", "a") file_v4 = open("v4.txt", "a") file_v5 = open("v5.txt", "a") file_v6 = open("v6.txt", "a") file_v7 = open("v7.txt", "a") file_v8 = open("v8.txt", "a") file_v9 = open("v9.txt", "a") format_v = [file_v0,file_v1,file_v2,file_v3,file_v4,file_v5,file_v6,file_v7,file_v8,file_v9] the_attack_files = glob.glob("../Basic_Attack/*.txt") the_normal_files = glob.glob("../Normal_Data/*.txt") the_vali_files = glob.glob("../Vali_Data/*.txt") ##################################################### ########Format the files############################## ##################################################### attack_words = [] normal_words = [] vali_words = [] ##################################################### ########Read in the sequences######################## #########separate them into 2D arrays################ ##################################################### for f in the_attack_files: e = open(f,"r+") attack_words.extend([e.read().split()]) e.close() for f in the_normal_files:
e = open(f,"r+") normal_words.extend([e.read().split()]) e.close()
for f in the_vali_files: e = open(f,"r+") vali_words.extend([e.read().split()]) e.close() files_a = len(attack_words)/10 files_n = len(normal_words)/10 files_v = len(vali_words)/10 print("Normal Words: " + str(len(normal_words))) print("Average normal words per formatted file: " + str(files_n)) print("Attack Words: " + str(len(attack_words))) print("Average attack words per formatted file: " + str(files_a)) print("Validation Words: " + str(len(vali_words))) print("Average validation words per formatted file: " + str(files_v)) input_n = raw_input("Please input a value for n: ") print("Performing formatting with " + str(input_n) + " grams...") n = int(input_n) y = 0 index = 0 to_write = format_n[index] for norm in normal_words: for x in range(0,len(norm) - (n-1)): for form in range(0, n): if(form < n-1): to_write.write(str(norm[x+form]) + " ") elif(form == n-1): to_write.write(str(norm[x+form]) + " 0\n") to_write.write("new\n") y += 1 if(y % files_n == 0 and index < 9): print( str(y) + " instances in norm_block...") #print("X: " + str(y)) #print("Ending: " + str(index) + "\n Starting: " + str(index+1)) to_write.close() index = index + 1 to_write = format_n[index] y = 0 index = 0 to_write = format_a[index] for norm in attack_words: for x in range(0,len(norm) - (n-1)): for form in range(0, n): if(form < n-1): to_write.write(str(norm[x+form]) + " ") elif(form == n-1): to_write.write(str(norm[x+form]) + " 1\n") to_write.write("new\n") y += 1 if(y % files_a == 0 and index < 9): print( str(y) + " instances in att_block...") #print("Ending: " + str(index) + "\n Starting: " + str(index+1)) to_write.close() index = index + 1 to_write = format_a[index] y = 0 index = 0 to_write = format_v[index] for norm in vali_words: for x in range(0,len(norm) - (n-1)): for form in range(0,n): if(form < n-1): to_write.write(str(norm[x+form]) + " ") elif(form == n-1): to_write.write(str(norm[x+form]) + " 0\n") to_write.write("new\n") y += 1 if(y % files_v == 0 and index < 9): print( str(y) + " instances in vali_block...") #print("Ending: " + str(index) + "\n Starting: " + str(index+1)) to_write.close() index = index + 1 to_write = format_v[index] ##################################################### ########Generate the n-gram########################## #########and write that to the file################## ##################################################### #n = 3 #for norm in normal_words: # for x in range(0,len(norm)-(n-1)): # file__.write(str(norm[x]) + " " + str(norm[x+1]) + " " + str(norm[x+2]) + " 0\n") #for att in attack_words: # for x in range(0,len(att)-(n-1)): # file_.write(str(att[x]) + " " + str(att[x+1]) + " " + str(att[x+2]) + " 1\n") #for vali in vali_words: # for x in range(0,len(vali)-(n-1)): # file_v.write(str(vali[x]) + " " + str(vali[x+1]) + " " + str(vali[x+2]) + " 0\n") # file_v.write("new\n") print("Data Formatted...")
import random from django.dispatch import receiver from django.conf import settings from readthedocs.restapi.signals import footer_response from readthedocs.donate.models import SupporterPromo from readthedocs.donate.constants import INCLUDE, EXCLUDE from readthedocs.donate.utils import offer_promo PROMO_GEO_PATH = getattr(settings, 'PROMO_GEO_PATH', None) if PROMO_GEO_PATH: import geoip2.database # noqa from geoip2.errors import AddressNotFoundError # noqa geo_reader = geoip2.database.Reader(PROMO_GEO_PATH) def show_to_geo(promo, country_code): # Remove promo's that exclude this country. for filter in promo.geo_filters.all(): if filter.filter_type == INCLUDE: if country_code in filter.codes: continue else: return False if filter.filter_type == EXCLUDE: if country_code in filter.codes: return False return True def show_to_programming_language(promo, programming_language): """ Filter a promo by a specific programming language Return True if we haven't set a specific language, which means show to all languages. """ if promo.programming_language: return programming_language == promo.programming_language return True def choose_promo(promo_list): """ This is the algorithm to pick which promo to show. This takes into account how many remaining days this promo has to be shown. The algorithm is currently as such: * Take the remaining number of views each promo has today * Add them together, with each promo "assigned" to a range * Pick a random number between 1 and that total * Choose the ad whose range is in the chosen random number In the future, we should take into account the expected views for today (The number of views from this day last week) Then we can scale the "total ads sold" against that "expected views", and that will give us more spread throughout the day. """ promo_range = [] total_views_needed = 0 for promo in promo_list: promo_range.append([ total_views_needed, total_views_needed + promo.views_needed_today(), promo ]) total_views_needed += promo.views_needed_today() choice = random.randint(0, total_views_needed) for range_list in promo_range: if range_list[0] <= choice <= range_list[1]: return range_list[2] return None def get_promo(country_code, programming_language, gold_project=False, gold_user=False): """ Get a proper promo. Takes into account: * Gold User status * Gold Project status * Geo * Programming Language """ promo_queryset = SupporterPromo.objects.filter(live=True, display_type='doc') filtered_promos = [] for obj in promo_queryset: # Break out if we aren't meant to show to this language if obj.programming_language and not show_to_programming_language(obj, programming_language): continue # Break out if we aren't meant to show to this country if country_code and not show_to_geo(obj, country_code): continue # If we haven't bailed because of language or country, possibly show the promo filtered_promos.append(obj) promo_obj = choose_promo(filtered_promos) # Show a random house ad if we don't have anything else if not promo_obj: house_promo = SupporterPromo.objects.filter(live=True, name='house').order_by('?') if house_promo.exists(): promo_obj = house_promo.first() # Support showing a "Thank you" message for gold folks if gold_user: gold_promo = SupporterPromo.objects.filter(live=True, name='gold-user') if gold_promo.exists(): promo_obj = gold_promo.first() # Default to showing project-level thanks if it exists if gold_project: gold_promo = SupporterPromo.objects.filter(live=True, name='gold-project') if gold_promo.exists(): promo_obj = gold_promo.first() return promo_obj @receiver(footer_response) def attach_promo_data(sender, **kwargs): request = kwargs['request'] context = kwargs['context'] resp_data = kwargs['resp_data'] project = context['project'] # Bail out early if promo's are disabled. use_promo = getattr(settings, 'USE_PROMOS', True) if not use_promo: resp_data['promo'] = False return gold_user = gold_project = False promo_obj = country_code = None show_promo = project.allow_promos # The request is by a GoldUser if request.user.is_authenticated(): if request.user.gold.count() or request.user.goldonce.count(): gold_user = True # A GoldUser has mapped this project if project.gold_owners.count(): gold_project = True # Don't show gold users promos. # This will get overridden if we have specific promos for them below. if gold_user or gold_project: show_promo = False if PROMO_GEO_PATH: # Get geo information from the IP, but don't record it anywhere ip = request.META.get('REMOTE_ADDR') if ip: try: geo_response = geo_reader.city(ip) country_code = geo_response.country.iso_code except (AddressNotFoundError, ValueError): # Invalid IP country_code = None # Try to get a promo if we should be using one. if show_promo: promo_obj = get_promo( country_code=country_code, prog
ramming_language=project.programming_language, gold_project=gold_project, gold_user=gold_user, ) # If we don't have anything to show, don't show it. if not promo_obj: show_promo = False if show_promo: promo_dict = offer_promo(promo_obj=promo_obj, project=project) resp_data['promo_data'] = promo_dict # Set promo object on r
eturn JSON resp_data['promo'] = show_promo
"""Device tracker support for OPNSense routers.""" from homeassistant.components.device_tracker import DeviceScanner from . import CONF_TRACKER_INTERFACE, OPNSENSE_DATA async def async_get_scanner(hass, config, discovery_info=None): """Configure the OPNSense device_tracker.""" interface_client = hass.data[OPNSENSE_DATA]["interfaces"] scanner = OPNSenseDeviceScanner( interface_client, hass.data[OPNSENSE_DATA][CONF_TRACKER_INTERFACE] ) return scanner class OPNSenseDeviceScanner(DeviceScanner): """This class queries a router running OPNsense.""" def __init__(self, client, interfaces): """Initialize the scanner.""" self.last_results = {} self.client = client self.interfaces = interfaces def _get_mac_addrs(self, devices): """Create dict with mac address keys from list of devices.""" out_devices = {} for device in devices: if not self.interfaces: out_devices[device["mac"]] = device elif device["intf_description"] in self.interfaces: out_devices[device["mac"]] = device return out_devices def scan_devices(self): """Scan for new devi
ces and return a list with found device IDs.""" self.update_info() return list(self.last_results) def
get_device_name(self, device): """Return the name of the given device or None if we don't know.""" if device not in self.last_results: return None hostname = self.last_results[device].get("hostname") or None return hostname def update_info(self): """Ensure the information from the OPNSense router is up to date. Return boolean if scanning successful. """ devices = self.client.get_arp() self.last_results = self._get_mac_addrs(devices) def get_extra_attributes(self, device): """Return the extra attrs of the given device.""" if device not in self.last_results: return None if not (mfg := self.last_results[device].get("manufacturer")): return {} return {"manufacturer": mfg}
#!/usr/bin/env python # Convert line elements with overlapping endpoints into polylines in an # SVG file. import os import sys try: from lxml import etree except ImportError: import xml.etree.ElementTree as etree from collections import defaultdict from optparse import OptionParser SVG_NS = 'http://www.w3.org/2000/svg' START = 1 END = 2 class Line(object): def __init__(self, line_element): a = line_element.attrib self.x1 = float(a['x1']) self.y1 = float(a['y1']) self.x2 = float(a['x2']) self.y2 = float(a['y2']) self.strokeWidth = float(a['stroke-width']) def reverse(self): self.x1, self.x2 = self.x2, self.x1 self.y1, self.y2 = self.y2, self.y1 def start_hash(self): return str(self.x1) + ',' + str(self.y1) def end_hash(self): return str(self.x2) + ',' + str(self.y2) def endpoint(self, direction): if direction == START: return self.start_hash() else: return self.end_hash() def get_other_hash(self, key): h = self.start_hash() if h == key: h = self.end_hash() return h def __repr__(self): return '((%s,%s),(%s,%s),sw:%s)' % (self.x1, self.y1, self.x2, self.y2, self.strokeWidth) class EndpointHash(object): def __init__(self, lines): self.endpoints = defaultdict(list) for l in lines: self.endpoints[l.start_hash()].append(l) self.endpoints[l.end_hash()].append(l) def count_overlapping_points(self): count = 0 for key, lines in self.endpoints.iteritems(): l = len(lines) if l > 1: count += 1 return count def _del_line(self, key, line): self.endpoints[key].remove(line) if len(self.endpoints[key]) == 0: del self.endpoints[key] def remove_line(self, line): key = line.start_hash() self._del_line(key, line) self._del_line(line.get_other_hash(key), line) def pop_connected_line(self, line, key): if key in self.endpoints: line = self.endpoints[key][0] self.remove_line(line) return line else: return def parse_svg(fname): print "Parsing '%s'..." % (fname) return etree.parse(fname) def get_lines(svg): lines = [] for l in svg.getroot().iter('{%s}line' % SVG_NS): lines.append(Line(l)) return lines def align_lines(l1, l2): if ( l1.x1 == l2.x1 and l1.y1 == l2.y1 or l1.x2 == l2.x2 and l1.y2 == l2.y2): l2.reverse() def connect_lines(lines, endpoint_hash, line, direction, poly): while True: key = line.endpoint(direction) connected_line = endpoint_hash.pop_connected_line(line, key) if connected_line: if direction == START: poly.insert(0, connected_line) else: poly.append(connected_line) align_lines(line, connected_line) lines.remove(connected_line) line = connected_line else: break def find_polylines(lines, endpoint_hash): polylines = [] while lines: line = lines.pop() endpoint_hash.remove_line(line) poly = [line] connect_lines(lines, endpoint_hash, line, START, poly) connect_lines(lines, endpoint_hash, line, END, poly) polylines.append(poly) return polylines def optimize(svg): lines = get_lines(svg) print '%s line segments found' % len(lines) lines_by_width = defaultdict(list) for l in lines: lines_by_width[l.strokeWidth].append(l) del lines print '%s different stroke widths found:' % len(lines_by_width) for width, lines in lines_by_width.iteritems(): print ' strokeWidth: %s (%s lines)' % (width, len(lines)) polylines = [] for width, lines in lines_by_width.iteritems(): print 'Finding polylines (strokeWidth: %s)... ' % width endpoint_hash = EndpointHash(lines) overlapping_points = endpoint_hash.count_overlapping_points() print (' %s line segments, %s overlapping points' % (len(lines), overlapping_points)), p = find_polylines(lines, endpoint_hash) print '-> %s polylines' % len(p) polylines += p return polylines def write_svg(polylines, outfile): print "Writing '%s'..." % outfile f = open(outfile, 'w') f.write("""<?xml version="1.0" standalone="no"?> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> <svg width="100%" height="100%" xmlns="http://www.w3.org/2000/svg" version="1.1"> """) def point_to_str(x, y): return '%s,%s ' % (x, y) for p in polylines: points = [] for line in p: if not points: points.append(point_to_str(line.x1, line.y1)) points.append(point_to_str(line.x2, line.y2)) f.write('<polyline fill="none" stroke="#000" stroke-width="%s" points="%s"/>\n' % (p[0].strokeWidth, ' '.join(points))) f.write('</svg>\n') f.close() def get_filesize(fname): return os.stat(fname).st_size def print_size_stats(infile, outfile): insize = get_filesize(infile) outsize = get_filesize(outfile) print ('Original file size: %.2fKiB, new file size: %.2fKiB (%.2f)' % (insize / 1024., outsize / 1024., float(outsize) / insize * 100)) def main(): usage = 'Usage: %prog INFILE OUTFILE' parser = OptionParser(usage=usage) options, args = parser.parse_args() if len(args) < 2: parser.error('input and output files must be specified')
return 2 infile = args[0] outfile = args[1] svg = parse_svg(infile) polylines = optimize(svg) print '%s polyline(s) found in total' % len(polylines) write_svg(polylines, out
file) print_size_stats(infile, outfile) return 0 if __name__ == '__main__': try: sys.exit(main()) except KeyboardInterrupt: sys.exit(1)
lock: raise EmptyPoolError( self, "Pool reached maximum size and no more connections are allowed.", ) pass # Oh well, we'll create a new connection then # If this is a persistent connection, check if it got disconnected if conn and is_connection_dropped(conn): log.debug("Resetting dropped connection: %s", self.host) conn.close() if getattr(conn, "auto_open", 1) == 0: # This is a proxied connection that has been mutated by # httplib._tunnel() and cannot be reused (since it would # attempt to bypass the proxy) conn = None return conn or self._new_conn() def _put_conn(self, conn): """ Put a connection back into the pool. :param conn: Connection object for the current host and port as returned by :meth:`._new_conn` or :meth:`._get_conn`. If the pool is already full, the connection is closed and discarded because we exceeded maxsize. If connections are discarded frequently, then maxsize should be increased. If the pool is closed, then the connection will be closed and discarded. """ try: self.pool.put(conn, block=False) return # Everything is dandy, done. except AttributeError: # self.pool is None. pass except queue.Full: # This should never happen if self.block == True log.warning("Connection pool is full, discarding connection: %s", self.host) # Connection never got put back into the pool, close it. if conn: conn.close() def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ pass def _prepare_proxy(self, conn): # Nothing to do for HTTP connections. pass def _get_timeout(self, timeout): """ Helper that always returns a :class:`urllib3.util.Timeout` """ if timeout is _Default: return self.timeout.clone() if isinstance(timeout, Timeout): return timeout.clone() else: # User passed us an int/float. This is for backwards compatibility, # can be removed later return Timeout.from_float(timeout) def _raise_timeout(self, err, url, timeout_value): """Is the error actually a timeout? Will raise a ReadTimeout or pass""" if isinstance(err, SocketTimeout): raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % timeout_value ) # See the above comment about EAGAIN in Python 3. In Python 2 we have # to specifically catch it and throw the timeout error if hasattr(err, "errno") and err.errno in _blocking_errnos: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % timeout_value ) # Catch possible read timeouts thrown as SSL errors. If not the # case, rethrow the original. We need to do this because of: # http://bugs.python.org/issue10272 if "timed out" in str(err) or "did not complete (read)" in str( err ): # Python < 2.7.4 raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % timeout_value ) def _make_request( self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw ): """ Perform a request on a given urllib connection object taken from our pool. :param conn: a connection from one of our connection pools :param timeout: Socket timeout in seconds for the request. This can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts. """ self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout # Trigger any extra validation we need to do. try: self._validate_conn(conn) except (SocketTimeout, BaseSSLError) as e: # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) raise # conn.request() calls httplib.*.request, not the method in # urllib3.request. It also calls makefile (recv) on the socket. if chunked: conn.request_chunked(method, url, **httplib_request_kw) else: conn.request(method, url, **httplib_request_kw) # Reset the timeout for the recv() on the socket read_timeout = timeout_obj.read_timeout # App Engine doesn't have a sock attr if getattr(conn, "sock", None): # In Python 3 socket.py will catch EAGAIN and return None when you # try and read into the file pointer created by http.client, which # instead raises a BadStatusLine exception. Instead of catching # the exception and assuming all BadStatusLine exceptions are read # timeouts, check for a zero timeout before making the request. if read_timeout == 0: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % read_timeout ) if read_timeout is Timeout.DEFAULT_TIMEOUT: conn.sock.settimeout(socket.getdefaulttimeout()) else: # None or a value conn.sock.settimeout(read_timeout) # Receive the response from the server try: try:
# Python 2.7, use buffering of HTTP responses httplib_response = conn.getresponse(buffering=True) except TypeError: # Python 3 try: httplib_response = conn.getresponse()
except BaseException as e: # Remove the TypeError from the exception chain in # Python 3 (including for exceptions like SystemExit). # Otherwise it looks like a bug in the code. six.raise_from(e, None) except (SocketTimeout, BaseSSLError, SocketError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise # AppEngine doesn't have a version attr. http_version = getattr(conn, "_http_vsn_str", "HTTP/?") log.debug( '%s://%s:%s "%s %s %s" %s %s', self.scheme, self.host, self.port, method, url, http_version, httplib_response.status, httplib_response.length, ) try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 log.warning( "Failed to parse headers (url=%s): %s", self._absolute_url(url), hpe, exc_info=True, ) return httplib_response def _absolute_url(self, path): return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url def close(self): """ Close all pooled connections and disable the pool. """ if self.pool is None: return # Disable access to the pool old_pool, self.pool = self.pool, None try: while True: conn = old_pool.get(block=False) if conn: conn.close() except queue.Empty: pass # Done. def is_same_host(self, url): """ Check if the given ``url
import os import sys here = os.path.
abspath(os.path.split(__file__)[0]) sys.path.insert(0, os.path.join(here, os.pardir, os.pardir, os.pardir)) import localpaths a
s _localpaths # noqa: F401
import sys import time import json import logging import random import tornado.options from tornado.options import define, options from tornado import gen define('srp_root',default='http://192.168.56.1') #define('srp_root',default='https://remote-staging.utorrent.com') #define('srp_root',default='https://remote.utorrent.com') define('debug',default=True) define('verbose',default=1, type=int) tornado.options.parse_command_line() if options.debug: import pdb import tornado.ioloop from falcon_api.session import Session from falcon_api.util import asyncsleep from falcon_api.classic import Client import tornado.httpclient httpclient = tornado.httpclient.AsyncHTTPClient(force_instance=True, max_clients=1) @gen.engine def test_login(): username = sys.argv[1] password = sys.argv[2] # check result.. #torrent = 'http://www.clearbits.net/get/503-control-alt-deus---made-of-fire.torrent' hash = ''.join([random.choice( list('abcdef') + map(str,range(10)) ) for _ in range(40)]) torrent = 'magnet:?xt=urn:btih:%s' % hash for _ in range(1): client = Client(username, password) client.sync() yield gen.Task( asyncsleep, 1 ) #client.add_url(torrent) client.stop() tasks = [] for hash, torrent in client.torrents.items(): if torrent.get('progress') == 1000: tasks.append( gen.Task( torrent.fetch_files ) ) tasks.append( gen.Task( torrent.fetch_metadata ) ) responses = yield gen.Multi( tasks ) logging.info('responses %s' % [r.code for r in responses]) tasks = [] for hash, torrent in client.torrents.items(): if torrent.get('progress') == 1000: for file in torrent.files: link = file.webseed_link() print link request = tornado.httpclient.HTTPRequest(link, validate_cer
t=False) tasks.append( gen.Task( httpclient.fetch, request ) ) while tasks: some_tasks = [tasks.pop() for _ in range(5)] logging.info('executing tasks of len %s' % len(some_tasks)) responses = yield gen.Multi( some_tasks ) logging.info('responses %s' % [(r.code, len(r.body)) for r in responses]) if Fal
se: tasks = [] for hash, torrent in client.torrents.items(): if torrent.get('progress') == 1000: link = torrent.webseed_link() print torrent.get('name'), torrent.get('progress'), link request = tornado.httpclient.HTTPRequest(link, validate_cert=False) tasks.append( gen.Task( httpclient.fetch, request ) ) responses = yield gen.Multi( tasks ) logging.info('responses %s' % [r.code for r in responses]) if __name__ == '__main__': ioloop = tornado.ioloop.IOLoop.instance() test_login() ioloop.start()
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2010 Benny Malengier # Copyright (C) 2011 Tim G L Lyons # # This program is free software; you can redistribute it and/or modify # it under
the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even
the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ...const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from . import MatchesFilterBase #------------------------------------------------------------------------- # # MatchesFilter # #------------------------------------------------------------------------- class MatchesSourceFilterBase(MatchesFilterBase): """ Rule that checks against another filter. """ labels = [_('Source filter name:')] name = 'Objects with source matching the <source filter>' description = "Matches objects with sources that match the " \ "specified source filter name" category = _('Citation/source filters') # we want to have this filter show source filters namespace = 'Source' def prepare(self, db, user): MatchesFilterBase.prepare(self, db, user) self.MSF_filt = self.find_filter() def apply(self, db, object): if self.MSF_filt is None : return False for citation_handle in object.get_citation_list(): citation = db.get_citation_from_handle(citation_handle) sourcehandle = citation.get_reference_handle() if self.MSF_filt.check(db, sourcehandle): return True return False
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Test runner objects that's only for end-to-end tests. This package defines runners, which are used to exec
ute test pipeline and verify results. """ # Protect against environments where dataflow runner is not available. # pylint: disable=wrong-import-order, wrong-import-position from __future__ import absolute_import try: from apache_beam.runners.dataflow.test_dataflow_runner import TestDataflowRunner from apache_beam.runners.direct.test_direct_runner import TestDirectRunner except
ImportError: pass # pylint: enable=wrong-import-order, wrong-import-position
# Copyright (c) 2020, Manfred Moitzi # License: MIT License import sys import time from datetime import datetime from pathlib import Path from ezdxf.acc import USE_C_EXT from ezdxf.render.forms import ellipse if USE_C_EXT is False: print("C-extension disabled or not available.") sys.exit(1) from ezdxf.math._construct import ( has_clockwise_orientation as py_has_clockwise_orientation, ) from ezdxf.acc.construct import ( has_clockwise_orientation as cy_has_clockwise_orientation, ) from ezdxf.math._construct import ( intersection_line_line_2d as py_intersection_line_line_2d, ) from ezdxf.acc.construct import ( intersection_line_line_2d as cy_intersection_line_line_2d, ) from ezdxf.version import __version__ from ezdxf.acc.vector import Vec2 def open_log(name: str): parent = Path(__file__).parent p = parent / "logs" / Path(name + ".csv") if not p.exists(): with open(p, mode="wt") as fp: fp.write( '"timestamp"; "pytime"; "cytime"; ' '"python_version"; "ezdxf_version"\n' ) log_file = open(p, mode="at") return log_file def log(name: str, pytime: float, cytime: float): log_file = open_log(name) timestamp = datetime.now().isoformat() log_file.write( f'{timestamp}; {pytime}; {cytime}; "{sys.version}"; "{__version__}"\n' ) log_file.close() def profile1(func, *args) -> float: t0 = time.perf_counter() func(*args) t1 = time.perf_counter() return t1 - t0 def profile(text, log_name, pyfunc, cyfunc, *args): pytime = profile1(pyfunc, *args) cytime = profile1(cyfunc, *args) ratio = pytime / cytime print(f"Python - {text} {pytime:.3f}s") print(f"Cython - {text} {cytime:.3f}s") print(f"Ratio {ratio:.1f}x") log(log_name, pytime, cytime) def profile_py_has_clockwise_orientation(vertices, count): for _ in range(count): py_has_clockwise_orientation(vertices) def profile_cy_has_clockwise_orientation(vertices, count): for _ in range(count): cy_has_clockwise_orientation(vertices) def profile_py_intersection_line_line_2d(count): line1 = [Vec2(0, 0), Vec2(2, 0)] line2 = [Vec2(1, -1), Vec2(1, 1)] for _ in range(count): py_intersection_line_line_2d(line1, line2) def profile_cy_intersection_line_line_2d(count): line1 = [Vec2(0, 0), Vec2(2, 0)] line2 = [Vec2(1, -1), Vec2(1, 1)] for _ in range(count): cy_intersection_line_line_2d(line1, line2) def profile_py_no_intersection_line_line_2d(count): line1 = [Vec2(0, 0), Vec2(2, 0)] line2 = [Vec2(0, 1), Vec2(2, 1)] for _ in range(count): py_intersection_line_line_2d(line1, line2) def profile_cy_no_intersection_line_line_2d(count): line1 = [Vec2(0, 0), Vec2(2, 0)] line2 = [Vec2(0, 1), Vec2(2, 1)] for _ in range(count): cy_intersection_line_line_2d(line1, line2) RUNS = 100_000 ellipse_vertices = list(ellipse(count=100, rx=10, ry=5)) print(f"Profiling 2D construction tools as Python an
d Cython implementations:") profile( f"detect {RUNS}x clockwise orientation of {len(ellipse_vertices)} vertices:", "c2d_has_clockwise_orientation", profile_py_has_clockwise_orientation, profile_cy_has_clockwise_orientation, ellipse_vertices, RUNS, ) profile( f"detect {RUNS}x real 2D line intersections:", "c2d_intersection_line_line_2d", profile_py_intersection_line_line_2d, profile_cy_intersection_line_line_2d, RUNS, ) profile( f"detect {R
UNS}x no 2D line intersections:", "c2d_no_intersection_line_line_2d", profile_py_no_intersection_line_line_2d, profile_cy_no_intersection_line_line_2d, RUNS, )
from pylab import * from scipy.io import loadmat, savemat import time import dicom Rel = 4.5 # assumed Relaxivity of Gd-DTPA at 3 T [s^-1 [mmol Gd-DTPA]^{-1}] flip_angle = 30 * pi / 180.0 # rad TR = 5e-3 # sec nx = 80 ny = 50 nt = 1321 noise_sigma = 0.2 random_seed = 1337 seed(random_seed) dx = 1 deltat = 1 data_dir = 'DICOM/' file_ext = 'QIBA_v06_Tofts_beta1' outfile_base = 'qiba6' data_dicom = zeros((nt, nx, ny)) t = 0.5*arange(nt) # ms print 'reading DICOMs from', data_dir for k in range(nt): file_name = '%s/%s_%04d.dcm' % (data_dir, file_ext, k+1) dcm = dicom.read_file(file_name) data_dicom[k,:,:] = dcm.pixel_array.astype('float') data_dce = data_dicom[:,10:70,:] nt, nx, ny = data_dce.shape T1map = ones((nx, ny)) # s R1map = 1 / T1map S0map = ones((nx, ny)) * 50000.0 # data_aif = mean(mean(data_dicom[:,70:,:], axis=2), axis=1) noise_sigma *= data_dce[0,0,0] # subsample data to
speed up the run data_dce = data_dce[::deltat,:,:] data_aif = data_aif[::deltat] # TODO: do this better t = t[::deltat] nt = len(t) # ## 2. Derive the AIF ## # turn Sb into Cp print 'co
nverting plasma ROI to AIF' def dce_to_r1eff(S, S0, R1, TR, flip): S = S.T S0 = S0.T A = S.copy() / S0 # normalize by pre-contrast signal E0 = exp(-R1 * TR) E = (1.0 - A + A*E0 - E0*cos(flip)) /\ (1.0 - A*cos(flip) + A*E0*cos(flip) - E0*cos(flip)) R = (-1.0 / TR) * log(E) return R.T def r1eff_to_conc(R1eff, R1map, relaxivity): return (R1eff - R1map) / relaxivity T1p = 1.440 R1p = 1 / T1p Hct = 0.45 S0 = data_aif[:4].mean() R1_eff_aif = dce_to_r1eff(data_aif, S0, R1p, TR, flip_angle) Cb = r1eff_to_conc(R1_eff_aif.flatten(), R1p, Rel) Cp = Cb.flatten() / (1.0 - Hct) ## 3. Reduce the problem size averaging 10x10 ROIs to single pixels. ##" nx /= dx ny /= dx data_dce = data_dce[:,::dx,::dx] R1map_reduced = R1map[::10,::10].copy() S0map_reduced = S0map[::10,::10].copy() data_dce_reduced = data_dce[:,::10,::10].copy() mask = zeros_like(R1map) == 0 mask_reduced = mask[::10,::10].copy() print 'writing MAT files' mat = {} mat["relaxivity"] = 4.5 mat["TR"] = 5e-3 mat["DCEdata"] = data_dce_reduced mat["DCEflip"] = 30.0 mat["R10"] = R1map_reduced mat["S0"] = S0map_reduced mat["t"] = t mat["Cp"] = Cp mat['mask'] = mask_reduced mat['models'] = [2] savemat(outfile_base + '.mat', mat) data_dce = abs(data_dce + noise_sigma*(randn(nt, nx, ny) + 1j*randn(nt, nx, ny)) / sqrt(2.0)) #data_dce = data_dce + noise_sigma*randn(nt, nx, ny) mat["R10"] = R1map mat["S0"] = S0map mat['DCEdata'] = data_dce mat['mask'] = mask savemat(outfile_base + 'noisy.mat', mat)
# Copyright (C) 2015 – 2021 Noa-Emil Nissinen (4shadoww) from core.hakkuframework import *
from core import getpath import http.client import socket conf = { "name": "apache_users", # Module's name (should be same as file's name) "version": "1.1", # Module version "shortde
sc": "scan directory of apache users", # Short description "github": "4shadoww", # Author's github "author": "4shadoww", # Author "email": "[email protected]", "initdate": "2016-03-01", "lastmod": "2021-07-11", "apisupport": True } # List of the variables variables = OrderedDict(( ("target", ["google.com", "target address"]), )) # Simple changelog changelog = "Version 1.0:\nrelease" def run(): variables['target'][0] = variables['target'][0].replace("http://", "") variables['target'][0] = variables['target'][0].replace("https://", "") print_info("your target : " + variables['target'][0]) print_info("loading path list...") f = open(getpath.db()+'apache_users.txt', 'r') paths = [] for line in f: paths.append(line.replace('\n', '')) f.close() try: paths_found = [] for path in paths: path = path.replace("\n", "") conn = http.client.HTTPConnection(variables['target'][0]) conn.request("GET", path) res = conn.getresponse() if(res.status==200): print_success("[%s] ... [%s %s]" % (path, res.status, res.reason)) paths_found.append(path) else: print_warning("[%s] ... [%s %s]" % (path, res.status, res.reason)) return paths_found except(socket.gaierror): print_error("host is down") return ModuleError("host is down")
from __futur
e__ import unicode_literals from django.apps import AppConfig class MergeserverConfig(AppConfig): name = 'Mer
geServer'
## # Copyright 2009-2013 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ General EasyBuild support for installing FastQC @author: Emilio Palumbo """ import os import stat from easybuild.tools.filetools import run_cmd from easybuild.easyblocks.ge
neric.packedbinary import PackedBinary class EB_FastQC(PackedBinary): """Easyblock implementing the build step for
FastQC, this is just give execution permission to the `fastqc` binary before installing. """ def install_step(self): """Overwrite install_step from PackedBinary""" os.chdir(self.builddir) os.chmod("FastQC/fastqc", os.stat("FastQC/fastqc").st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) super(EB_FastQC, self).install_step()
# -*- coding: utf-8 -*- """ plotting.py Part of douglib. Used for general data plotting. Created on Tue June 06 08:44:12 2014 @author: dthor """ # -------------------------------------------------------------
-------------- ### Imports # --------------------------------------------------------------------------- # Standard Library # Third-Party import matplotlib.pyplot as pyplot # Package / Application from .core import rc_to_radius def radius_plot(rcd_list, die_xy, center_rc): """ Plots up data by radius """ # rc_to_radius x_data = [] y_data = [] for rcd in
rcd_list: x_data.append(rc_to_radius((rcd[0], rcd[1]), die_xy, center_rc)) y_data.append(rcd[2]) pyplot.figure() pyplot.plot(x_data, y_data, 'bo') pyplot.xlabel("Radius") pyplot.ylabel("Value") pyplot.show() def main(): """ Runs only when module is called directly. Runs a quick sanity check on some of the functions in this module. """ import random die_xy = (2.43, 3.3) center_rc = (24, 31.5) fake_rcd_list = [] for row in range(30): for col in range(54): value = (random.normalvariate(10, 5) + rc_to_radius((row, col), die_xy, center_rc)) fake_rcd_list.append([row, col, value]) radius_plot(fake_rcd_list, die_xy, center_rc) # random.gauss() if __name__ == "__main__": main()
#!/usr/bin/env python # ********************************************************************** # # Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved. # # This copy of Ice is licensed to you under the terms described in the # ICE_LICENSE file included in this distribution. # # ********************************************************************** import os, sys, threading, subprocess, getopt, signal path = [ ".", "..", "../..", "../../..", "../../../.." ] head = os.path.dirname(sys.argv[0]) if len(head) > 0: path = [os.path.join(head, p) for p in path] path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ] if len(path) == 0: raise RuntimeError("can't find toplevel directory!") sys.path.append(os.path.join(path[0], "scripts")) import TestUtil def removeTrustSettings(): serverCert = os.path.join(path[0], "certs", "server.pem") if os.system("security verify-cert -c " + serverCert + " >& /dev/null") == 0: sys.stdout.write("removing trust settings for the HTTP server certificate... ") sys.stdout.flush() if os.system("security remove-trusted-cert " + serverCert) != 0: print("\nerror: couldn't remove trust settings for the HTTP server certificate") else: print("ok") else: print("trust settings already removed") # # On OS X, provide an option to allow removing the trust settings # if TestUtil.isDarwin(): try: opts, args = getopt.getopt(sys.argv[1:], "", ["clean"]) if ("--clean", "") in opts: removeTrustSettings() sys.exit(0) except getopt.GetoptError: pass version = "3.6.0" jar = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "java/test/controller/build/libs/testController-%(version)s.jar" % {"version": version}) javaHome = os.environ.get("JAVA_HOME", "") javaCmd = '%s' % os.path.join(javaHome, "bin", "java") if javaHome else "java" command = [javaCmd, "-jar", jar] if le
n(sys.argv) > 1: command += sys.argv[1:] p = subprocess.Popen(command, shell = False, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 0) def signal_handler(signal, frame): if p: p.terminate() sys.exit(0) signal.signal(signal.SIGINT, signal_handler) signal.signal(si
gnal.SIGTERM, signal_handler) if TestUtil.isDarwin(): # # On OS X, we set the trust settings on the certificate to prevent # the Web browsers from prompting the user about the unstrusted # certificate. Some browsers such as Chrome don't provide the # option to set this trust settings. # serverCert = os.path.join(TestUtil.toplevel, "certs", "server.pem") if os.system("security verify-cert -c " + serverCert + " >& /dev/null") != 0: sys.stdout.write("adding trust settings for the HTTP server certificate... ") sys.stdout.flush() if os.system("security add-trusted-cert -r trustAsRoot " + serverCert) != 0: print("error: couldn't add trust settings for the HTTP server certificate") print("ok") print("run " + sys.argv[0] + " --clean to remove the trust setting") while(True): c = p.stdout.read(1) if not c: break if c == '\r': continue # Depending on Python version and platform, the value c could be a # string or a bytes object. if type(c) != str: c = c.decode() sys.stdout.write(c) sys.stdout.flush()
None): self._dict = {} self._contained_type = contained_type self._container = container if container is not None else self def __eq__(self, other): try: return ( # quick check for comparing, e.g., XigtCorpus and Igt self._contained_type == other._contained_type and len(self) == len(other) and all(a == b for a, b in zip(self, other)) )
except AttributeError: return False def __getitem__(self, obj_id): if isinstance(obj_id, (int, slice)): return list.__getitem__(self, obj_id) elif obj_id in self._dict: return self._dict[obj_id] else: try: return list.__getitem__(self, int(obj_id)) except ValueError: pass raise KeyError(obj_id) def __setitem__(self, idx, obj): # only allow list indices, not d
ict keys (IDs) # NOTE: this method is destructive. check for broken refs here? self._assert_type(obj) try: cur_obj = list.__getitem__(self, idx) except TypeError: idx = int(idx) cur_obj = list.__getitem__(self, idx) if cur_obj.id is not None: del self._dict[cur_obj.id] self._create_id_mapping(obj) list.__setitem__(self, idx, obj) def __delitem__(self, obj_id): # NOTE: this method is destructive. check for broken refs here? obj = self[obj_id] self.remove(obj) def get(self, obj_id, default=None): try: return self[obj_id] except (KeyError, IndexError): pass return default def select(self, **kwargs): # handle namespace separately so we can lookup the nsmap if 'namespace' in kwargs and kwargs['namespace'] in self.nsmap: kwargs['namespace'] = self.nsmap[kwargs['namespace']] def match(x): return all(getattr(x, k, None) == v for k, v in kwargs.items()) return filter(match, self) def _assert_type(self, obj): if self._contained_type and not isinstance(obj, self._contained_type): raise XigtStructureError( 'Only {} objects are allowed in this container.' .format(self._contained_type.__name__) ) def append(self, obj): self._assert_type(obj) obj._parent = self._container self._create_id_mapping(obj) list.append(self, obj) def insert(self, i, obj): self._assert_type(obj) obj._parent = self._container self._create_id_mapping(obj) list.insert(self, i, obj) def extend(self, objs): for obj in objs: self.append(obj) def remove(self, obj): # NOTE: this method is destructive. check for broken refs here? if obj.id is not None: del self._dict[obj.id] list.remove(self, obj) def clear(self): self._dict.clear() # list.clear doesn't exist in Python2 # list.clear(self) listclear(self) def _create_id_mapping(self, obj): if obj.id is not None: if obj.id in self._dict: raise XigtError( 'Id "{}" already exists in collection.'.format(obj.id), ) self._dict[obj.id] = obj def refresh_index(self): self._dict = {} for obj in self: self._create_id_mapping(obj) # deprecated methods def add(self, obj): warnings.warn( 'add(x) is deprecated; use append(x) instead.', DeprecationWarning ) return self.append(obj) def add_list(self, objs): warnings.warn( 'add_list(xs) is deprecated; use extend(xs) instead.', DeprecationWarning ) return self.extend(objs) class XigtAttributeMixin(object): def __init__(self, id=None, type=None, attributes=None, namespace=None, nsmap=None): self.id = id self.type = type self.attributes = dict(attributes or []) self.namespace = namespace self.nsmap = nsmap # if id is not None or ID not in self.attributes: # self.attributes[ID] = id # if type is not None or TYPE not in self.attributes: # self.attributes[TYPE] = type def __eq__(self, other): try: return ( self.id == other.id and self.type == other.type and self.attributes == other.attributes and self.namespace == other.namespace # and self.nsmap == other.nsmap ) except AttributeError: return False def get_attribute(self, key, default=None, inherit=False, namespace=None): if key is None: raise ValueError( 'Attribute key must be of type str, not ' + key.__class__.__name__ ) if not key.startswith('{') and ':' in key: prefix, suffix = key.split(':', 1) key = '{%s}%s' % (self.nsmap[prefix], suffix) elif namespace in self.nsmap: key = '{%s}%s' % (self.nsmap[namespace], key) elif namespace: key = '{%s}%s' % (namespace, key) try: return self.attributes[key] except KeyError: if inherit and _has_parent(self): return self._parent.get_attribute( key, default, inherit, namespace=namespace ) else: return default @property def id(self): return self._id @id.setter def id(self, value): if value is not None and not id_re.match(value): raise ValueError('Invalid ID: {}'.format(value)) self._id = value @property def nsmap(self): if self._nsmap is None: if _has_parent(self): return self._parent.nsmap else: return {} else: return self._nsmap @nsmap.setter def nsmap(self, value): if value is not None: value = dict(value or []) self._nsmap = value # no validation for type yet, so the property isn't necessary # @property # def type(self): # return self._type # @type.setter # def type(self, value): # self._type = value class XigtReferenceAttributeMixin(object): def __init__(self, alignment=None, content=None, segmentation=None): if segmentation and (content or alignment): raise XigtError( 'The "segmentation" reference attribute cannot co-occur with ' 'the "content" or "alignment" reference attributes.' ) if alignment is not None: self.attributes[ALIGNMENT] = alignment if content is not None: self.attributes[CONTENT] = content if segmentation is not None: self.attributes[SEGMENTATION] = segmentation def referents(self, refattrs=None): if not getattr(self, 'igt'): raise XigtError('Cannot retrieve referents; unspecified IGT.') if not getattr(self, 'id'): raise XigtError('Cannot retrieve referents; unspecified id.') return self.igt.referents(self.id, refattrs=refattrs) def referrers(self, refattrs=None): if not getattr(self, 'igt'): raise XigtError('Cannot retrieve referrers; unspecified IGT.') if not getattr(self, 'id'): raise XigtError('Cannot retrieve referrers; unspecified id.') return self.igt.referrers(self.id, refattrs=refattrs) @property def alignment(self): return self.attributes.get(ALIGNMENT) @alignment.setter def alignment(self, value): self.attributes[ALIGNMENT] = value @property def content(self): return self.attributes.get(CONTENT) @content.setter def content(self, value): self.attributes[CONTENT] = value @property def segmentatio
""" def dist(m, i, j, what = lambda m: m.decisions): "Euclidean distance 0 <= d <= 1 between decisions" n = len(i.cells) deltas = 0 for c in what(m): n1 = norm(m, c, i.cells[c]) n2 = norm(m, c, j.cells[c]) inc = (n1 - n2) ** 2 deltas += inc n += abs(m.w[c]) return deltas ** 0.5 / n ** 0.5 """ The _Dist_ function normalizes all the raw values zero to one. """ def norm(m, c, val) : "Normalizes val in col c within model m 0..1" return (atom(val) - atom(m.lo[c])) / (atom(m.hi[c]) - atom(m.lo[c]) + 0.0001) """ Now we can define _furthest_: """ def furthest(m, i, all, init = 0, better = gt): "find which of all is furthest from 'i'" out, d = i, init for j in all: if i == j: continue tmp = dist(m, i, j) if better(tmp, d): out, d = j, tmp return out """ And of course, _closest_: """ def closest(m, i, all): return furthest(m, i, all, init = 10 ** 32, better = lt) """ ## WHERE2 = Recursive Fastmap WHERE2 finds everyone's else's distance from the poles and divide the data on the mean point of those distances. This all stops if: + Any division has _tooFew_ solutions (say, less than _sqrt_ of the total number of solutions). + Something has gone horribly wrong and you are recursing _tooDeep_ This code is controlled by the options in [_The_ settings](settingspy). For example, if _The.pruning_ is true, we may ignore some sub-tree (this process is discussed, later on). Also, if _The.verbose_ is true, the _show_ function prints out a little tree showing the progress (and to print indents in that tree, we use the string _The.b4_). For example, here's WHERE2 dividing 93 examples from NASA93. ---| _where |----------------- 93 |.. 46 |.. |.. 23 |.. |.. |.. 11 |.. |.. |.. |.. 5. |.. |.. |.. |.. 6. |.. |.. |.. 12 |.. |.. |.. |.. 6. |.. |.. |.. |.. 6. |.. |.. 23 |.. |.. |.. 11 |.. |.. |.. |.. 5. |.. |.. |.. |.. 6. |.. |.. |.. 12 |.. |.. |.. |.. 6. |.. |.. |.. |.. 6. |.. 47 |.. |.. 23 |.. |.. |.. 11 |.. |.. |.. |.. 5. |.. |.. |.. |.. 6. |.. |.. |.. 12 |.. |.. |.. |.. 6. |.. |.. |.. |.. 6. |.. |.. 24 |.. |.. |.. 12 |.. |.. |.. |.. 6. |.. |.. |.. |.. 6. |.. |.. |.. 12 |.. |.. |.. |.. 6. |.. |.. |.. |.. 6. WHERE2 returns clusters, where each cluster contains multiple solutions. """ def where2(m, data, lvl = 0, up = None, verbose = False): node = o(val = None, _up = up, _kids = []) def tooDeep(): return lvl > The.what.depthMax def tooFew() : return len(data) < The.what.minSize def show(suffix): if verbose: print(The.what.b4 * lvl, len(data), suffix, ' ; ', id(node) % 1000, sep = '') if tooDeep() or tooFew(): show(".") node.val = data else: show("") wests, west, easts, east, c = fastmap(m, data) node.update(c = c, east = east, west = west) goLeft, goRight = maybePrune(m, lvl, west, east) if goLeft: node._kids += [where2(m, wests, lvl + 1, node)] if goRight: node._kids += [where2(m, easts, lvl + 1, node)] return node """ ## An Experimental Extensions Lately I've been experimenting with a system that prunes as it divides the data. GALE checks for domination between the poles and ignores data in halves with a dominated pole. This means that for _N_ solutions we only ever have to evaluate _2*log(N)_ of them- which is useful if each evaluation takes a long time. The niches found in this way contain non-dominated poles; i.e. they are approximations to the Pareto frontier. Preliminary results show that this is a useful approach but you should treat those results with a grain of salt. In any case, this code supports that pruning as an optional extra (and is enabled using the _slots.pruning_ flag). In summary, this code says if the scores for the poles are more different that _slots.wriggle_ and one pole has a better score than the other, then ignore the other pole. """ def maybePrune(m, lvl, west, east): "Usually, go left then right, unless dominated." goLeft, goRight = True, True # default if The.prune and lvl >= The.what.depthMin: sw = scores(m, west) se = scores(m, east) if abs(sw - se) > The.wriggle: # big enough to consider if se > sw: goLeft = False # no left if sw > se: goRight = False # no right return goLeft, goRight """ Note that I do not allow pruning until we have descended at least _slots.depthMin_ into the tree. ### Model-specific Stuff WHERE2 talks to models via the the following model-specific variables: + _m.cols_: list of indices in a list + _m.names_: a list of names for each column. + _m.decisions_: the subset of cols relating to decisions. + _m.obectives_: the subset of cols relating to objectives. + _m.eval(m,eg)_: function for computing variables from _eg_. + _m.lo[c]_ : the lowest value in column _c_. + _m.hi[c]_ : the highest value in column _c_. + _m.w[c]_: the weight for each column. Usually equal to one. If an objective and if we are minimizing that objective, then the weight is negative. ### Model-general stuff Using the model-specific stuff, WHERE2 defines some useful general functions. """ def some(m, x) : "with variable x of model m, pick one value at random" return m.lo[x] + by(m.hi[x] - m.lo[x]) def scores(m, it): "Score an individual." if not it.scored: m.eval(m, it) new, w = 0, 0 for c in m.objectives: val = it.cells[c] w += abs(m.w[c]) tmp = norm(m, c, val) if m.w[c] < 0: tmp = 1 - tmp new += (tmp ** 2) it.score = (new ** 0.5) / (w ** 0.5 + 1e-4) it.scored = True return it.score """ ## Tree Code Tools for manipulating the tree returned by _where2_. ### Primitive: Walk the nodes """ def nodes(tree, seen = None, steps = 0): if seen is None: seen = [] if tree: if not id(tree) in seen: seen.append(id(tree)) yield tree, steps for kid in tree._kids: for sub, steps1 in nodes(kid, seen, steps + 1): yield sub, steps1 """ ### Return nodes that are leaves """ def leaves(tree, seen = None, steps = 0): for node, steps1 in nodes(tree, seen, steps): if not node._kids: yield node, steps1 """ ### Return nodes nearest to furthest """ # walk sideways.. def neighbors(leaf, seen = None, steps = -1): """Walk the tree from 'leaf' increasingly distant leaves. """ if seen is None: seen = [] for down, steps1 in leaves(leaf, seen, steps + 1): yield down, steps1 if leaf: for up, steps1 in neighbors(leaf._up, seen, steps + 1): yield up, steps1 """ ### Return nodes in Groups, Closest to Furthest """ def around(leaf, f = lambda x: x): tmp, last = [], None for node, dist in neighbors(leaf): if dist > 0: if dist == last: tmp += [f(node)] else: if tmp: yield last, tmp tmp = [f(node)] last = dist if tmp: yield last, tmp """ ## Demo Code ### Code Showing the scores """ # @go def _scores(): m = nasa93() out = [] for row in m._rows: scores(m, row) out += [(row.score, [row.cells[c] for c in m.objectives])] for s, x in sorted(out): print(s, x) """ ### Code Showing the Distances """ # @go def _distances(m = nasa93): m = m() seed(The.seed) for i in m._rows: j = closest(m, i, m._rows) k = furthest(m, i, m._rows) idec = [i.cells[c] for c in m.decisions] jdec = [j.cells[c] for c in m.decisions] kdec = [k.cells[c] for c in m.decisions] print("\n", gs(idec), g(scores(m, i)), "\n", gs(jdec), "closest ", g(dist(m, i, j)), "\n", gs(kdec), "furthest", g(dist(m, i, k))) """ ### A Demo for Where2. """ def prepare(m, settings = None): "Prepare the 'The' class" seed(1) global The The = settings if settings else defaults().update(verbose = True, minSize = len(m._rows) ** 0.5,
prune = False, wriggle = 0.3) return The def _where(m = nasa93): m = m() see
d(1) told = N() for r in m._rows: s = scores(m, r) told += s g
# Exercise 16: Reading and Writing Files from sys import argv script, filename = argv print "We're going to erase %r." % filename print "If you don't want that, hit CTRL-C (^C)." print "If you do want that, hit RETURN." raw_input("?") print "Opening
the file..." target = open(filename, 'w') print "Truncating the file. Goodbye!" target.truncate() print "Now I'm going to ask you for three lines." line1 = raw_input("line 1: ") line2 = raw_input("line 2: ") line3 = raw_input("
line 3: ") print "I'm going to write these to the file." target.write(line1) target.write("\n") target.write(line2) target.write("\n") target.write(line3) target.write("\n") print "And finally, we close it." target.close() # $ python ex16.py test.txt
import unittest class StringProcessingTestBase(unittest.TestCase): # The backslash character. Needed since there are limitations when # using backslashes at the end of raw-strings in front of the # terminating " or '. bs = "\\" # Basic test strings all StringProcessing functions should test. test_strings = [ r"out1 'escaped-escape: \\ ' out2", r"out1 'escaped-quote: \' ' out2", r"out1 'escaped-anything: \X ' out2", r"out1 'two escaped escapes: \\\\ ' out2", r"out1 'escaped-quote at end: \'' out2", r"out1 'escaped-escape at end: \\' out2", r"out1 'str1' out2 'str2' out2", r"out1 \' 'str1' out2 'str2' out2", r"out1 \\\' 'str1' out2 'str2' out2", r"out1 \\ 'str1' out2 'str2' out2", r"out1 \\\\ 'str1' out2 'str2' out2", r"out1 \\'str1' out2 'str2' out2", r"out1 \\\\'str1' out2 'str2' out2", r"out1 'str1''str2''str3' out2", r"", r"out1 out2 out3", bs, 2 * bs] # Test string for multi-pattern tests (since we want to variate the # pattern, not the test string). multi_pattern_test_string = (r"abcabccba###\\13q4ujsabbc\+'**'ac" r"###.#.####-ba") # Multiple patterns for the multi-pattern tests. multi_patterns = [r"abc", r"ab", r"ab|ac", 2 * bs, r"#+", r"(a)|(b)|(#.)", r"(?:a(b)*c)+", r"1|\+"] # Test strings for the remove_empty_matches feature (alias auto-trim). auto_trim_test_pattern = r";" auto_trim_test_strings = [r";;;;;;;;;;;;;;;;", r"\\;\\\\\;\\#;\\\';;\;\\\\;+ios;;", r"1;2;3;4;5;6;", r"1;2;3;4;5;6;7", r"", r"Hello world", r"\;", r"\\;", r"abc;a;;;;;asc"] # Test strings for search-in-between functions. search_in_between_begin_pattern = r"(" search_in_between_end_pattern = r")" search_in_between_test_strings = [ r"()assk(This is a word)and((in a word) another ) one anyway.", r"bcc5(((((((((((((((((((1)2)3)))))))))))))))))", r"Let's (do (it ) more ) complicated ) ) ) () (hello.)", r"()assk\\(This\ is a word\)and((in a\\\ word\\\\\) another \)) " r"one anyway.", r"bcc5\(\(\((((((\\\(((((((((((1)2)3))\\\\\)))))))))))))\)\)", r"Let's \(do (it ) more ) \\ complicated ) ) ) () (hello.)\\z"] @staticmethod def _construct_message(func, args, kwargs): """ Constructs the error message for the call result assertions. :param func: The function that was called. :param args: The argument tuple the function was invoked with. :param kwargs: The named arguments dict the function was invoked with. :param return: The error message. """ args = [repr(x) for x in args] kwargs = [str(key) + '=' + repr(value) for key, value in kwargs.items()] return "Called {}({}).".format(func.__name__, ", ".join(args + kwargs)) def assertResultsEqual(self, func, invocation_and_results, postprocess=lambda result: result): """ Tests each given invocation against the given results with the specified function. :param func: The function to test. :param invocation_and_results: A dict containing the invocation tuple as key and the result as value. :param postprocess: A function that shall process the returned result from the tested function. The function must accept only one parameter as postprocessing input. Performs no postprocessing by default. """ for args, result in invocation_and_results.items(): self.assertEqual( postprocess(func(*args)), result, self._construct_message(func, args, {
})) def assertResultsEqualEx(self, func, invocation_and_results, postprocess=lambda result: result):
""" Tests each given invocation against the given results with the specified function. This is an extended version of ``assertResultsEqual()`` that supports also ``**kwargs``. :param func: The function to test. :param invocation_and_results: A dict containing the invocation tuple as key and the result as value. The tuple contains (args, kwargs). :param postprocess: A function that shall process the returned result from the tested function. The function must accept only one parameter as postprocessing input. Performs no postprocessing by default. """ for (args, kwargs), result in invocation_and_results.items(): self.assertEqual( postprocess(func(*args, **kwargs)), result, self._construct_message(func, args, kwargs))
# -*- coding: utf-8 -*- import os import KBEngine from KBEDebug import * def onBaseAppReady(isBootstrap): """ KBEngine method. baseapp已经准备好了 @param isBootstrap: 是否为第一个启动的baseapp @type isBootstrap: BOOL """ INFO_MSG('onBaseAppReady: isBootstrap=%s, appID=%s, bootstrapGroupIndex=%s, bootstrapGlobalIndex=%s' % \ (isBootstrap, os.getenv("KBE_COMPONENTID"), os.getenv("KBE_BOOTIDX_GROUP"), os.getenv("KBE_BOOTIDX_GLOBAL"))) def onReadyForLogin(isBootstrap): """ KBEngine method. 如果返回值大于等于1.0则初始化全部完成, 否则返回准备的进度值0.0~1.0。 在此可以确保脚本层全部初始化完成之后才开放登录。 @param isBootstrap: 是否为第一个启动的baseapp @type isBootstrap: BOOL """ return 1.0 def onReadyForShutDown(): """ KBEngine method. 进程询问脚本层:我要shu
tdown了,脚本是否准备好了? 如果返回True,则进程会进入shutdown的流程,其它值会使得进程在过一段时间后再次询问。 用户可以在收到消息时进行脚本层的数据清理工作,以让脚本层的工作成果不会因为shutdown而丢失。 """ INFO_MSG('onReadyForShutDown()') return True def onBaseAppShutDown(state): """ KBEngine method. 这个baseapp被关闭前的回调函数 @param state: 0 : 在断开所有客户端之前 1 : 在将所有entity写入数据库之前 2 : 所有entity被写入数据库之后 @type state: int """ INFO_MSG('onBaseAppShutDown: state=%i' % state) def onInit(isReload): """
KBEngine method. 当引擎启动后初始化完所有的脚本后这个接口被调用 @param isReload: 是否是被重写加载脚本后触发的 @type isReload: bool """ INFO_MSG('onInit::isReload:%s' % isReload) def onFini(): """ KBEngine method. 引擎正式关闭 """ INFO_MSG('onFini()') def onCellAppDeath(addr): """ KBEngine method. 某个cellapp死亡 """ WARNING_MSG('onCellAppDeath: %s' % (str(addr))) def onGlobalData(key, value): """ KBEngine method. globalData有改变 """ DEBUG_MSG('onGlobalData: %s' % key) def onGlobalDataDel(key): """ KBEngine method. globalData有删除 """ DEBUG_MSG('onDelGlobalData: %s' % key) def onGlobalBases(key, value): """ KBEngine method. globalBases有改变 """ DEBUG_MSG('onGlobalBases: %s' % key) def onGlobalBasesDel(key): """ KBEngine method. globalBases有删除 """ DEBUG_MSG('onGlobalBasesDel: %s' % key) def onLoseChargeCB(ordersID, dbid, success, datas): """ KBEngine method. 有一个不明订单被处理, 可能是超时导致记录被billing 清除, 而又收到第三方充值的处理回调 """ DEBUG_MSG('onLoseChargeCB: ordersID=%s, dbid=%i, success=%i, datas=%s' % \ (ordersID, dbid, success, datas))
''' This module controls the dialog to set filter criteria ''' from PyQt5 import QtCore, Qt, QtWidgets from views.filter_dialog import Ui_FilterDialog class FilterGamesController(QtWidgets.QDialog): ''' Controller object for the filter games dialog. ''' def __init__(self, table, parent=None): QtWidgets.QDialog.__init__(self, parent) self.user_interface = Ui_FilterDialog() self.user_interface.setupUi(self) self.table = table self.canceled = False self.filtering_all = True self.initialize_ui() self.setup_signals() def initialize_ui(self): ''' Connects interface's sections with their corresponding models ''' def assign_model(model, list_widget): ''' Private function to populate a specific section in the dialog with the values stored in a model parameters: - model: the model assigned to the dialog section - list_widget: the list widget to be populated ''' model_qt = Qt.QStandardItemModel() values_list = model.get_list() for value in values_list: item = Qt.QStandardItem(value) item.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled) item.setData(QtCore.Qt.Checked, QtCore.Qt.CheckStateRole) if model.get_filtered(value): item.setCheckState(QtCore.Qt.Unchecked) model_qt.appendRow(item) list_widget.setModel(model_qt) assign_model(self.table.models['system_list_model'], self.user_interface.listSystem) assign_model(self.table.models['status_list_model'], self.user_interface.listStatus) assign_model(self.table.models['label_list_model'], self.user_interface.listLabel) assign_model(self.table.models['difficulty_list_model'], self.user_interface.listDifficulty) def setup_signals(self): ''' Connects interface's widgets signals to the corresponding slots ''' def select_all(list_view): ''' Generic callback for a 'select all' button parameters: -list_view: the list affected when the user clicks 'select all' ''' model_qt = list_view.model() for index in range(model_qt.rowCount()): item = model_qt.item(index) if item.isCheckable() and item.checkState() == QtCore.Qt.Unchecked: item.setCheckState(QtCore.Qt.Checked) def deselect_all(list_view): ''' Generic callback for a 'deselect all' button parameters: - list_view: the list affected when the user clicks 'deselect all' ''' model_qt = list_view.model() for index in range(model_qt.rowCount()): item = model_qt.item(index) if item.isCheckable() and item.checkState() == QtCore.Qt.Checked: item.setCheckState(QtCore.Qt.Unchecked) self.user_interface.pushButtonSelectAllSystem.clicked.connect( lambda: select_all(self.user_interface.listSystem)) self.user_interface.pushButtonDeselectAllSystem.clicked.connect( lambda: deselect_all(self.user_interface.listSystem)) self.user_interface.pushButtonSelectAllStatus.clicked.connect( lambda: select_all(self.user_interface.listStatus)) self.user_interface.pushButtonDeselectAllStatus.clicked.connect( lambda: deselect_all(self.user_interface.listStatus)) self.user_interface.pushButtonSelectAllLabel.clicked.connect( lambda: select_all(self.user_interface.listLabel)) self.user_interface.pushButtonDeselectAllLabel.clicked.connect( lambda: deselect_all(self.user_interface.listLabel)) self.user_interface.pushButtonSelectAllDifficulty.clicked.connect( lambda: select_all(self.user_interface.listDifficulty)) self.user_interface.pushButtonDeselectAllDifficulty.clicked.connect( lambda: deselect_all(self.user_interface.listDifficulty)) self.user_interface.pushButtonOk.clicked.connect(self.ok_clicked) self.user_interface.pushButtonCancel.clicked.co
nnect(self.cancel_clicked) def
ok_clicked(self): ''' Callback for when the user clicks the 'ok' button. The dialog is closed and the parent is informed by means of an attribute that the changes have to take effect ''' self.canceled = False self.hide() def cancel_clicked(self): ''' Callback for when the user clicks the 'cancel' button. The dialog is closed and the parent is informed by means of an attribute that changes shouldn't take effect ''' self.canceled = True self.hide() def closeEvent(self, event): ''' Overriding the closeEvent from the QDialog class. This tells the main window controller to behave as if the Cancel button was pressed. parameters: - event: the passed event (not used in this overriden version) ''' # pylint: disable=invalid-name # pylint: disable=unused-argument self.canceled = True def apply_filtering(self): ''' Updates the models with information about which values to be filted ''' def apply_filtering_per_type(model, list_widget): ''' Updates a specific model parameters: - model: the model to be updated - list_widget: the list associated to that model ''' model_qt = list_widget.model() for index in range(model_qt.rowCount()): item = model_qt.item(index) model.set_filtered(str(item.text()), item.checkState() != QtCore.Qt.Checked) if not self.canceled: apply_filtering_per_type( self.table.models['system_list_model'], self.user_interface.listSystem) apply_filtering_per_type( self.table.models['status_list_model'], self.user_interface.listStatus) apply_filtering_per_type( self.table.models['label_list_model'], self.user_interface.listLabel) apply_filtering_per_type( self.table.models['difficulty_list_model'], self.user_interface.listDifficulty) self.table.hide_rows() models = [self.table.models['system_list_model'], self.table.models['status_list_model'], self.table.models['label_list_model'], self.table.models['difficulty_list_model']] model = 0 while model < len(models) and not models[model].is_any_filtered(): model = model + 1 self.filtering_all = model >= len(models)
from interface.design.ui_screen import Ui_wnd_gifextract from PyQt5 import QtWidgets import sys import listener import config import ffmpeg import queue import interface.menus.Frame_CreateGif import interface.menus.Frame_ExtractFrames import interface.menus.Frame_Queue class Screen(QtWidgets.QMainWindow): def __init__(self, parent=None): def setupFFMpeg(): self.ffmpeg = ffmpeg.FFmpeg(self.config) def setupConfig(): self.config = config.Config(self) def setupQueue(): self.queue = queue.JobQueue(self) def setupTabs(): self.tab_video = interface.menus.Frame_ExtractFrames.Frame(self) self.ui.tabWidget.addTab(self.tab_video, "Frame Extraction") self.tab_gif = interface.menus.Frame_CreateGif.Frame(self) self.ui.tabWidget.addTab(self.tab_gif, "Gif Creation") self.tab_queue = interface.menus.Frame_Queue.Frame(self) self.ui.tabWidget.addTab(self.tab_queue, "Queue") QtWidgets.QWidget.__init__(self, parent) self.ui = Ui_wnd_gifextract() self.ui.setupUi(self) self.slots = listener.Slots(self) self.createLinks() setupConfig() setupTa
bs() setupFFMpeg() setupQueue() def createLinks(self): self.ui.actionPrefe
rences.triggered.connect(self.openOptions) def openOptions(self): import interface.menus.ConfigMenu options = interface.menus.ConfigMenu.ConfigMenu(self, self.config) options.show() if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) program = Screen() program.show() sys.exit(app.exec_())
# # Copyright 2007 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Convert iCalendar files to Gettext PO localization files. See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ical2po.html for examples and usage instructions. """ from translate.convert import convert from translate.storage import ical, po class ical2po: """Convert one or two iCalendar files to a single PO file.""" SourceStoreClass = ical.icalfile TargetStoreClass = po.pofile TargetUnitClass = po.pounit def __init__( self, input_file, output_file, template_file=None, blank_msgstr=False, duplicate_style="msgctxt", ): """Initialize the converter.""" self.blank_msgstr = blank_msgstr self.duplicate_style = duplicate_style
self.extraction_msg = None self.output_file = output_file self.source_store = self.SourceStoreClass(input_file) self.target_store = self.TargetStoreClass() self.template_store = None if template_file is not None: self.template_store = self.
SourceStoreClass(template_file) def convert_unit(self, unit): """Convert a source format unit to a target format unit.""" target_unit = self.TargetUnitClass(encoding="UTF-8") target_unit.addlocation("".join(unit.getlocations())) target_unit.addnote(unit.getnotes("developer"), "developer") target_unit.source = unit.source target_unit.target = "" return target_unit def convert_store(self): """Convert a single source format file to a target format file.""" self.extraction_msg = "extracted from %s" % self.source_store.filename for source_unit in self.source_store.units: self.target_store.addunit(self.convert_unit(source_unit)) def merge_stores(self): """Convert two source format files to a target format file.""" self.extraction_msg = "extracted from {}, {}".format( self.template_store.filename, self.source_store.filename, ) self.source_store.makeindex() for template_unit in self.template_store.units: target_unit = self.convert_unit(template_unit) template_unit_name = "".join(template_unit.getlocations()) add_translation = ( not self.blank_msgstr and template_unit_name in self.source_store.locationindex ) if add_translation: source_unit = self.source_store.locationindex[template_unit_name] target_unit.target = source_unit.source self.target_store.addunit(target_unit) def run(self): """Run the converter.""" if self.template_store is None: self.convert_store() else: self.merge_stores() if self.extraction_msg: self.target_store.header().addnote(self.extraction_msg, "developer") self.target_store.removeduplicates(self.duplicate_style) if self.target_store.isempty(): return 0 self.target_store.serialize(self.output_file) return 1 def run_converter( input_file, output_file, template_file=None, pot=False, duplicatestyle="msgctxt" ): """Wrapper around converter.""" return ical2po( input_file, output_file, template_file, blank_msgstr=pot, duplicate_style=duplicatestyle, ).run() formats = { "ics": ("po", run_converter), ("ics", "ics"): ("po", run_converter), } def main(argv=None): parser = convert.ConvertOptionParser( formats, usetemplates=True, usepots=True, description=__doc__ ) parser.add_duplicates_option() parser.passthrough.append("pot") parser.run(argv) if __name__ == "__main__": main()