prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
from django.apps import AppConfig from django.conf impor
t settin
gs from neomodel import config config.AUTO_INSTALL_LABELS = False class NeomodelConfig(AppConfig): name = 'django_neomodel' verbose_name = 'Django neomodel' def read_settings(self): config.DATABASE_URL = getattr(settings, 'NEOMODEL_NEO4J_BOLT_URL', config.DATABASE_URL) config.FORCE_TIMEZONE = getattr(settings, 'NEOMODEL_FORCE_TIMEZONE', False) config.ENCRYPTED_CONNECTION = getattr(settings, 'NEOMODEL_ENCRYPTED_CONNECTION', False) config.MAX_CONNECTION_POOL_SIZE = getattr(settings, 'NEOMODEL_MAX_CONNECTION_POOL_SIZE', config.MAX_CONNECTION_POOL_SIZE) def ready(self): self.read_settings()
#!/usr/bin/env python # -*- coding: UTF-8 -*- from bs4 import BeautifulSoup from urllib.request import urlopen def getSoupAQHI(): html = urlopen("http://www.aqhi.gov.hk/en/aqhi/past-24-hours-aqhi45fd.html?stationid=80") soup = BeautifulSoup(html, "lxml") return soup def getLatestAQHI(dataTable): aqhiTable = dataTable.findAll('tr')[1].findAll('td') aqhi = {} aqhi['dateTime'] = aqhiTable[0].text aqhi['index'] = aqhiTable[1].text return aqhi def getRawAQICN(): source = urlopen("http://aqicn.org/?city=HongKong/Central/Western&widgetscript&lang=en&size=xsmall&id=56d839cf2ad376.29520771") source = source.read().decode('utf-8') return sourc
e def getLatestAQICN(source): aqi = source.split("Air Pollution.")[1] aqi = aqi.split("title")[1]
aqi = aqi.split("</div>")[0] aqi = aqi.split(">")[1] aqits = source.split("Updated on ")[1].strip() aqits = aqits.split("<")[0] aqhiData = {} aqhiData['index'] = aqi aqhiData['dateTime'] = aqits return aqhiData def getPollutionData(): soupAQHI = getSoupAQHI() dataTableAQHI = soupAQHI.find('table', {'id' : 'dd_stnh24_table'}) aqhi = getLatestAQHI(dataTableAQHI) rawAQICN = getRawAQICN() aqicn = getLatestAQICN(rawAQICN) data = {} data['AQHI'] = aqhi['index'] data['AQHITS'] = aqhi['dateTime'] data['AQICN'] = aqicn['index'] data['AQICNTS'] = aqicn['dateTime'] return data def testModule(): data = getPollutionData() print(data['AQHI'] + " " + data['AQHITS'] + " " + data['AQICN'] + " " + data['AQICNTS'])
# Copy
right (C) 2011-2015 Patrick Totzke <[email protected]> # Thi
s file is released under the GNU GPL, version 3 or a later revision. # For further details see the COPYING file from __future__ import absolute_import import re import abc class AddressbookError(Exception): pass class AddressBook(object): """can look up email addresses and realnames for contacts. .. note:: This is an abstract class that leaves :meth:`get_contacts` unspecified. See :class:`AbookAddressBook` and :class:`ExternalAddressbook` for implementations. """ __metaclass__ = abc.ABCMeta def __init__(self, ignorecase=True): self.reflags = re.IGNORECASE if ignorecase else 0 @abc.abstractmethod def get_contacts(self): # pragma no cover """list all contacts tuples in this abook as (name, email) tuples""" return [] def lookup(self, query=''): """looks up all contacts where name or address match query""" res = [] query = re.compile('.*%s.*' % query, self.reflags) for name, email in self.get_contacts(): if query.match(name) or query.match(email): res.append((name, email)) return res
e platform observations.""" config = { "binary_sensor": { "platform": "bayesian", "name": "Test_Binary", "observations": [ { "platform": "numeric_state", "entity_id": "sensor.test_monitored", "below": 10, "above": 5, "prob_given_true": 0.6, }, { "platform": "numeric_state", "entity_id": "sensor.test_monitored1", "below": 7, "above": 5, "prob_given_true": 0.9, "prob_given_false": 0.1, }, ], "prior": 0.2, } } assert setup_component(self.hass, "binary_sensor", config) self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", 4) self.hass.block_till_done() state = self.hass.states.get("binary_sensor.test_binary") ass
ert [] == state.attributes.get("observations") assert 0.2 == state.attributes.get("probability") assert state.state == "off" self.hass.states.set("sensor.test_monitored", 6) self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", 4) self.hass.block_till_done() self.hass.states.set("se
nsor.test_monitored", 6) self.hass.states.set("sensor.test_monitored1", 6) self.hass.block_till_done() state = self.hass.states.get("binary_sensor.test_binary") assert state.attributes.get("observations")[0]["prob_given_true"] == 0.6 assert state.attributes.get("observations")[1]["prob_given_true"] == 0.9 assert state.attributes.get("observations")[1]["prob_given_false"] == 0.1 assert round(abs(0.77 - state.attributes.get("probability")), 7) == 0 assert state.state == "on" self.hass.states.set("sensor.test_monitored", 6) self.hass.states.set("sensor.test_monitored1", 0) self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", 4) self.hass.block_till_done() state = self.hass.states.get("binary_sensor.test_binary") assert 0.2 == state.attributes.get("probability") assert state.state == "off" self.hass.states.set("sensor.test_monitored", 15) self.hass.block_till_done() state = self.hass.states.get("binary_sensor.test_binary") assert state.state == "off" def test_sensor_state(self): """Test sensor on state platform observations.""" config = { "binary_sensor": { "name": "Test_Binary", "platform": "bayesian", "observations": [ { "platform": "state", "entity_id": "sensor.test_monitored", "to_state": "off", "prob_given_true": 0.8, "prob_given_false": 0.4, } ], "prior": 0.2, "probability_threshold": 0.32, } } assert setup_component(self.hass, "binary_sensor", config) self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", "on") state = self.hass.states.get("binary_sensor.test_binary") assert [] == state.attributes.get("observations") assert 0.2 == state.attributes.get("probability") assert state.state == "off" self.hass.states.set("sensor.test_monitored", "off") self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", "on") self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", "off") self.hass.block_till_done() state = self.hass.states.get("binary_sensor.test_binary") assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8 assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4 assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0 assert state.state == "on" self.hass.states.set("sensor.test_monitored", "off") self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", "on") self.hass.block_till_done() state = self.hass.states.get("binary_sensor.test_binary") assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0 assert state.state == "off" def test_sensor_value_template(self): """Test sensor on template platform observations.""" config = { "binary_sensor": { "name": "Test_Binary", "platform": "bayesian", "observations": [ { "platform": "template", "value_template": "{{states('sensor.test_monitored') == 'off'}}", "prob_given_true": 0.8, "prob_given_false": 0.4, } ], "prior": 0.2, "probability_threshold": 0.32, } } assert setup_component(self.hass, "binary_sensor", config) self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", "on") state = self.hass.states.get("binary_sensor.test_binary") assert [] == state.attributes.get("observations") assert 0.2 == state.attributes.get("probability") assert state.state == "off" self.hass.states.set("sensor.test_monitored", "off") self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", "on") self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", "off") self.hass.block_till_done() state = self.hass.states.get("binary_sensor.test_binary") assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8 assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4 assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0 assert state.state == "on" self.hass.states.set("sensor.test_monitored", "off") self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", "on") self.hass.block_till_done() state = self.hass.states.get("binary_sensor.test_binary") assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0 assert state.state == "off" def test_threshold(self): """Test sensor on probability threshold limits.""" config = { "binary_sensor": { "name": "Test_Binary", "platform": "bayesian", "observations": [ { "platform": "state", "entity_id": "sensor.test_monitored", "to_state": "on", "prob_given_true": 1.0, } ], "prior": 0.5, "probability_threshold": 1.0, } } assert setup_component(self.hass, "binary_sensor", config) self.hass.block_till_done() self.hass.states.set("sensor.test_monitored", "on") self.hass.block_till_done() state = self.hass.states.get("binary_sensor.test_binary") assert round(abs(1.0 - state.attributes.get("probability")), 7) == 0 assert state.state == "on" def test_multiple_observations(self): """Test sensor with multiple observations of same entity.""" config = { "binary_sensor": { "name": "Test_Binary", "platform": "bayesian", "observations": [ { "platform": "state", "entity_id": "sensor.test_monitor
self.opman2.start() collection = self.mongos_conn["test"]["mcsharded"] for i in range(1000): collection.insert_one({"i": i + 500}) # Assert current state of the mongoverse self.assertEqual(self.shard1_conn["test"]["mcsharded"].find().count(), 500) self.assertEqual(self.shard2_conn["test"]["mcsharded"].find().count(), 500) assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 1000) # Test successful chunk move from shard 1 to shard 2 self.mongos_conn["admin"].command( "moveChunk", "test.mcsharded", find={"i": 1}, to="demo-set-1" ) # doc manager should still have all docs all_docs = self.opman1.doc_managers[0]._search() self.assertEqual(len(all_docs), 1000) for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])): self.assertEqual(doc["i"], i + 500) # Mark the collection as "dropped". This will cause migration to fail. self.mongos_conn["config"]["collections"].update_one( {"_id": "test.mcsharded"}, {"$set": {"dropped": True}} ) # Test unsuccessful chunk move from shard 2 to shard 1 def fail_to_move_chunk(): self.mongos_conn["admin"].command( "moveChunk", "test.mcsharded", find={"i": 1}, to="demo-set-0" ) self.assertRaises(pymongo.errors.OperationFailure, fail_to_move_chunk) # doc manager should still have all docs all_docs = self.opman1.doc_managers[0]._search() self.assertEqual(len(all_docs), 1000) for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])): self.assertEqual(doc["i"], i + 500) def test_upgrade_oplog_progress(self): first_oplog_ts1 = self.opman1.oplog.find_one()["ts"] first_oplog_ts2 = self.opman2.oplog.find_one()["ts"] # Old format oplog progress file: progress = { str(self.opman1.oplog): bson_ts_to_long(first_oplog_ts1), str(self.opman2.oplog): bson_ts_to_long(first_oplog_ts2), } # Set up oplog managers to use the old format. oplog_progress = LockingDict() oplog_progress.dict = progress self.opman1.oplog_progress = oplog_progress self.opman2.oplog_progress = oplog_progress # Cause the oplog managers to update their checkpoints. self.opman1.update_checkpoint(first_oplog_ts1) self.opman2.update_checkpoint(first_oplog_ts2) # New format should be in place now. new_format = { self.opman1.replset_name: first_oplog_ts1, self.opman2.replset_name: first_oplog_ts2, } self.assertEqual(new_format, self.opman1.oplog_progress.get_dict()) self.assertEqual(new_format, self.opman2.oplog_progress.get_dict()) class TestOplogManagerSharded(ShardedClusterTestCase): """Defines all test cases for OplogThreads running on a sharded cluster with three node replica sets. """ def setUp(self): self.set_up_sharded_cluster(ShardedCluster) self.shard1_secondary_conn = self.cluster.shards[0].secondary.client( read_preference=ReadPreference.SECONDARY_PREFERRED ) self.shard2_secondary_conn = self.cluster.shards[1].secondary.client( read_preference=ReadPreference.SECONDARY_PREFERRED ) def tearDown(self): super(TestOplogManagerSharded, self).tearDown() close_client(self.shard1_secondary_conn) close_client(self.shard2_secondary_conn) def test_with_orphan_documents(self): """Test that DocManagers have proper state after a chunk migration that resuts in orphaned documents. """ # Start replicating to dummy doc managers self.opman1.start() self.opman2.start() collection = self.mongos_conn["test"]["mcsharded"] collection.insert_many([{"i": i + 500} for i in range(1000)]) # Assert current state of the mongoverse self.assertEqual(self.shard1_conn["test"]["mcsharded"].find().count(), 500) self.assertEqual(self.shard2_conn["test"]["mcsharded"].find().count(), 500) assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 1000) # Stop replication using the 'rsSyncApplyStop' failpoint. # Note: this requires secondaries to ensure the subsequent moveChunk # command does not complete. self.shard1_conn.admin.command( "configureFailPoint", "rsSyncApplyStop", mode="alwaysOn" ) # Move a chunk from shard2 to shard1 def move_chunk(): try: self.mongos_conn["admin"].command( "moveChunk", "test.mcsharded", find={"i": 1000}, to="demo-set-0" ) except pymongo.errors.OperationFailure: pass # moveChunk will never complete, so use another thread to continue mover = threading.Thread(target=move_chunk) mover.start() # wait for documents to start moving to shard 1 assert_soon(lambda: self.shard1_conn.test.mcsharded.count() > 500) # Get opid for moveChunk command operations = self.mongos_conn.test.current_op() opid = None for op in operations["inprog"]: if op.get("query", {}).get("moveChunk"): opid = op["opid"] if opid is None: raise SkipTest( "could not find moveChunk operation, cannot test " "failed moveChunk" ) # Kill moveChunk with the opid if self.mongos_conn.server_info()["versionArray"][:3] >= [3, 1, 2]: self.mongos_conn.admin.command("killOp", op=opid) else: self.mongos_conn["test"]["$cmd.sys.killop"].find_one({"op": opid}) # Mongo Connector should not become confused by unsuccessful chunk move docs = self.opman1.doc_managers[0]._search()
self.assertEqual(len(do
cs), 1000) self.assertEqual(sorted(d["i"] for d in docs), list(range(500, 1500))) self.shard1_conn.admin.command( "configureFailPoint", "rsSyncApplyStop", mode="off" ) # cleanup mover.join() def test_rollback(self): """Test the rollback method in a sharded environment Cases: 1. Documents on both shards, rollback on one shard 2. Documents on both shards, rollback on both shards """ self.opman1.start() self.opman2.start() # Insert first documents while primaries are up db_main = self.mongos_conn["test"]["mcsharded"] db_main2 = db_main.with_options(write_concern=WriteConcern(w=2)) db_main2.insert_one({"i": 0}) db_main2.insert_one({"i": 1000}) self.assertEqual(self.shard1_conn["test"]["mcsharded"].count(), 1) self.assertEqual(self.shard2_conn["test"]["mcsharded"].count(), 1) # Case 1: only one primary goes down, shard1 in this case self.cluster.shards[0].primary.stop(destroy=False) # Wait for the secondary to be promoted shard1_secondary_admin = self.shard1_secondary_conn["admin"] assert_soon(lambda: shard1_secondary_admin.command("isMaster")["ismaster"]) # Insert another document. This will be rolled back later def insert_one(doc): if not db_main.find_one(doc): return db_main.insert_one(doc) return True assert_soon( lambda: retry_until_ok(insert_one, {"i": 1}), "could not insert into shard1 with one node down", ) db_secondary1 = self.shard1_secondary_conn["test"]["mcsharded"] db_secondary2 = self.shard2_secondary_conn["test"]["mcsharded"] self.assertEqual(db_secondary1.count(), 2) # Wait for replication on the doc manager # Note that both OplogThreads share the same doc manager def c(): return len(self.opman1.doc_managers[0]._search()) == 3 assert_soon(c, "not all writes were replicated to doc manager", max_tries=120) # Kill the new
# This file is part of ZUKS-Controller. # # ZUKS-Controller is free software: you can redistribute it
and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ZUKS-Controller is distributed in the hope that it will be useful, # but WITH
OUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ZUKS-Controller. If not, see <http://www.gnu.org/licenses/>. """ WSGI config for server project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>) # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from psycopg2._psycopg import IntegrityError from odoo.exceptions import UserError, ValidationError from odoo.tests import common from odoo.tools import mute_logger class TestPartnerIdentificationBase(common.TransactionCase): def test_create_id_category(self): partner_id_category = self.env["res.partner.id_category"].create( {"code": "id_code", "name": "id_name"} ) self.assertEqual(partner_id_category.name, "id_name") self.assertEqual(partner_id_category.code, "id_code") @mute_logger("odoo.sql_db") def test_update_partner_with_no_category(self): partner_1 = self.env.ref("base.res_partner_1") self.assertEqual(len(partner_1.id_numbers), 0) # create with
out required category with self.assertRaises(IntegrityError): partner_1.write({"id_numbers": [(0, 0, {"name": "1234"})]}) def test_update_partner_with_category(self): partner_1 = self.env.ref("base.res_partner_1") partner_id_category = self.env["res.partner.id_category"].create( {"code": "new_code", "name": "new_name"} ) # successful creation partner_1.write( { "id_numbers": [ (0, 0,
{"name": "1234", "category_id": partner_id_category.id}) ] } ) self.assertEqual(len(partner_1.id_numbers), 1) self.assertEqual(partner_1.id_numbers.name, "1234") # delete partner_1.write({"id_numbers": [(5, 0, 0)]}) self.assertEqual(len(partner_1.id_numbers), 0) class TestPartnerCategoryValidation(common.TransactionCase): def test_partner_id_number_validation(self): partner_id_category = self.env["res.partner.id_category"].create( { "code": "id_code", "name": "id_name", "validation_code": """ if id_number.name != '1234': failed = True """, } ) partner_1 = self.env.ref("base.res_partner_1") with self.assertRaises(ValidationError), self.cr.savepoint(): partner_1.write( { "id_numbers": [ (0, 0, {"name": "01234", "category_id": partner_id_category.id}) ] } ) partner_1.write( { "id_numbers": [ (0, 0, {"name": "1234", "category_id": partner_id_category.id}) ] } ) self.assertEqual(len(partner_1.id_numbers), 1) self.assertEqual(partner_1.id_numbers.name, "1234") partner_id_category2 = self.env["res.partner.id_category"].create( { "code": "id_code2", "name": "id_name2", "validation_code": """ if id_number.name != '1235': failed = True """, } ) # check that the constrains is also checked when we change the # associated category with self.assertRaises(ValidationError), self.cr.savepoint(): partner_1.id_numbers.write({"category_id": partner_id_category2.id}) def test_bad_validation_code(self): partner_id_category = self.env["res.partner.id_category"].create( { "code": "id_code", "name": "id_name", "validation_code": """ if id_number.name != '1234' # missing : failed = True """, } ) partner_1 = self.env.ref("base.res_partner_1") with self.assertRaises(UserError): partner_1.write( { "id_numbers": [ (0, 0, {"name": "1234", "category_id": partner_id_category.id}) ] } ) def test_bad_validation_code_override(self): """ It should allow a bad validation code if context overrides. """ partner_id_category = self.env["res.partner.id_category"].create( { "code": "id_code", "name": "id_name", "validation_code": """ if id_number.name != '1234' # missing : failed = True """, } ) partner_1 = self.env.ref("base.res_partner_1").with_context(id_no_validate=True) partner_1.write( { "id_numbers": [ (0, 0, {"name": "1234", "category_id": partner_id_category.id}) ] } )
from math import floor, log10 def round_(x, n): """Round a float, x, to n significant figures. Caution should be applied when performing this operation. Significant figures are an implication of precision; arbitrarily truncating floats mid-calculation is probably not Good Practice in almost all cases. Rounding off a float to n s.f. results in a float. Floats are, in general, approximations of decimal numbers. The point here is that it is very possible to end up with an inexact number: >>> roundsf(0.0012395, 3) 0.00124 >>> roundsf(0.0012315, 3) 0.0012300000000000002 Basically, rounding in this way probably doesn't do what you want it to. """ n = int(n) x = float(x) if x == 0: return 0 e = floor(log10(abs(x)) - n + 1) # exponent, 10 ** e shifted_dp = x / (10 ** e) # decimal place shifted n d.p. return round(shifted_dp) * (10 ** e) # round and revert def string(x, n): """Convert a float, x, to a string with n significant figures. This function return
s a decimal string representation of a float to a specified number of significant figures. >>> create_string(9.80665, 3) '9.81' >>> create_string(0.0120076, 3) '0.0120' >>> create_string(100000, 5) '100000' Note the last representation is, without context, ambiguous. This is a good reason to
use scientific notation, but it's not always appropriate. Note ---- Performing this operation as a set of string operations arguably makes more sense than a mathematical operation conceptually. It's the presentation of the number that is being changed here, not the number itself (which is in turn only approximated by a float). """ n = int(n) x = float(x) if n < 1: raise ValueError("1+ significant digits required.") # retrieve the significand and exponent from the S.N. form s, e = ''.join(( '{:.', str(n - 1), 'e}')).format(x).split('e') e = int(e) # might as well coerce now if e == 0: # Significand requires no adjustment return s s = s.replace('.', '') if e < 0: # Placeholder zeros need creating return ''.join(('0.', '0' * (abs(e) - 1), s)) else: # Decimal place need shifting s += '0' * (e - n + 1) # s now has correct s.f. i = e + 1 sep = '' if i < n: sep = '.' if s[0] is '-': i += 1 return sep.join((s[:i], s[i:])) def scientific(x, n): """Represent a float in scientific notation. This function is merely a wrapper around the 'e' type flag in the formatting specification. """ n = int(n) x = float(x) if n < 1: raise ValueError("1+ significant digits required.") return ''.join(('{:.', str(n - 1), 'e}')).format(x) def general(x, n): """Represent a float in general form. This function is merely a wrapper around the 'g' type flag in the formatting specification. """ n = int(n) x = float(x) if n < 1: raise ValueError("1+ significant digits required.") return ''.join(('{:#.', str(n), 'g}')).format(x)
# Copyright 2021 Ecosoft Co.,
Ltd. (http://ecosoft.co.th) # License LGPL-3.0
or later (https://www.gnu.org/licenses/lgpl.html). from . import models
"""Utilities for extracting macros and preprocessor definitions from C files. Depends on Clang's python bindings. Note that cursors have children, which are also cursors. They are not iterators, they are nodes in a tree. Everything here uses iterators. The general strategy is to have multiple passes over the same cursor to extract everything needed, and this entire file can be viewed as filters over raw cursors.""" import itertools import clang.cindex as cindex import re from . flatten_cursor import flatten_cursor from .extracted_features import Macro def extract_preprocessor_cursors(cursor): """Get all preprocessor definitions from a cursor.""" for i in flatten_cursor(cursor): if i.kind.is_preprocessing(): yield i def extract_macro_cursors(c): """Get all macros from a cursor.""" return itertools.ifilter(lambda x: x.kind == cindex.CursorKind.MACRO_DEFINITION, extract_preprocessor_cursors(c)) def transform_token(token): """Returns a string representation of token. If it is a C numeric constant, it is transformed into a python numeric constant.""" #these are from python docs. find_float = "[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?" find_int = "[-+]?(0[xX][\dA-Fa-f]+|0[0-7]*|\d+)" untransformed_string = token.spelling try_find_int = re.match(find_int, untransformed_string) try_find_float = re.match(find_float, untransformed_string) new_string = untransformed_string if try_find_int is not None: new_string = try_find_int.group() elif try_find_float is not None: new_string = try_find_float.group() return new_string def extract_macros(c): """Uses eval and some regexp magic and general hackiness to extract as many macros as it possibly can. Returns a tuple. The first element is a
list of Macro objects;
the second is a list of strings that name macros we couldn't handle.""" handled_macros = [] currently_known_macros = dict() failed_macros = [] possible_macro_cursors = extract_macro_cursors(c) #begin the general awfulness. for i in possible_macro_cursors: desired_tokens = list(i.get_tokens())[:-1] #the last one is something we do not need. name_token = desired_tokens[0] name = name_token.spelling desired_tokens = desired_tokens[1:] if len(desired_tokens) == 0: #the value of this macro is none. value = None m = Macro(name = name, value = value, cursor = i) handled_macros.append(m) currently_known_macros[m.name] = m.value continue #otherwise, we have to do some hacky stuff. token_strings = [transform_token(j) for j in desired_tokens] eval_string = "".join(token_strings) try: value = eval(eval_string, currently_known_macros) if isinstance(value, type): raise ValueError("Value resolved to class, not instance.") except: failed_macros.append(name) continue m = Macro(value = value, name = name, cursor = i) handled_macros.append(m) currently_known_macros[m.name] = m.value return (handled_macros, failed_macros)
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.freesurfer.preprocess import ApplyVolTransform def test_ApplyVolTransform_inputs(): input_map = dict(args=dict(argstr='%s', ), environ=dict(nohash=True, usedefault=True, ), fs_target=dict(argstr='--fstarg', mandatory=True, requires=['reg_file'], xor=('target_file', 'tal', 'fs_target'), ), fsl_reg_file=dict(argstr='--fsl %s', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), ignore_exception=dict(nohash=True, usedefault=True, ), interp=dict(argstr='--interp %s', ), inverse=dict(argstr=
'--inv', ), invert_morph=dict(argstr='--inv-morph', requires=['m3z_file'], ), m3z_file=dict(argstr='--m3z %s', ), no_ded_m3z_path=dict(argstr='--noDefM3zPath', requires=['m3z_file'], ), no_resample=dict(argstr='--no-resample', ), reg_file=dict(ar
gstr='--reg %s', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), reg_header=dict(argstr='--regheader', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), source_file=dict(argstr='--mov %s', copyfile=False, mandatory=True, ), subject=dict(argstr='--s %s', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), subjects_dir=dict(), tal=dict(argstr='--tal', mandatory=True, xor=('target_file', 'tal', 'fs_target'), ), tal_resolution=dict(argstr='--talres %.10f', ), target_file=dict(argstr='--targ %s', mandatory=True, xor=('target_file', 'tal', 'fs_target'), ), terminal_output=dict(nohash=True, ), transformed_file=dict(argstr='--o %s', genfile=True, ), xfm_reg_file=dict(argstr='--xfm %s', mandatory=True, xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'), ), ) inputs = ApplyVolTransform.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_ApplyVolTransform_outputs(): output_map = dict(transformed_file=dict(), ) outputs = ApplyVolTransform.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2011 credativ Ltd (<http://www.credativ.co.uk>). # All Rights Reserved # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License
as # published by the Free
Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import bcr_format # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
#pragma
error #pragma repy global
foo foo = 2
import re LINE_RE = re.compile(r'\s*namer.add\("(.*)", 0x(.*)\);.*') with open('/tmp/colors.txt') as f: data = {} for line in f: matches = LINE_RE.match(line) if matches: color, number = matches.gr
oups() if len(number) < 8: number = 'ff%s' % number data[color] = number else: print 'ERROR: don\'t understand:', line inverse = {} dupes = {} for color, number in sorted(data.iteritems()): if number in inverse: dupes.setdefault(number, []).append(co
lor) else: inverse[number] = color print ' namer.add("%s", 0x%s);' % (color, number) if dupes: print dupes for number, colors in dupes.iteritems(): print '%s -> %s (originally %s)' % (number, colors, inverse[number])
, COALESCE(l_out.p_r, 0) AS 'out_packets_received' , COALESCE(l_out.sum_duration * 1.0 / l_out.total_out, 0) AS 'out_duration' , COALESCE(l_in.unique_in_ip, 0) AS 'unique_in_ip' , COALESCE(l_in.unique_in_conn, 0) AS 'unique_in_conn' , COALESCE(l_in.total_in, 0) AS 'total_in' , COALESCE(l_in.b_s, 0) AS 'in_bytes_sent' , COALESCE(l_in.b_r, 0) AS 'in_bytes_received' , COALESCE(l_in.max_bps, 0) AS 'in_max_bps' , COALESCE(l_in.sum_b * 1.0 / l_in.sum_duration, 0) AS 'in_avg_bps' , COALESCE(l_in.p_s, 0) AS 'in_packets_sent' , COALESCE(l_in.p_r, 0) AS 'in_packets_received' , COALESCE(l_in.sum_duration * 1.0 / l_in.total_in, 0) AS 'in_duration' , COALESCE(l_in.ports_used, 0) AS 'ports_used' , children.endpoints AS 'endpoints' , COALESCE(t.seconds, 0) + 300 AS 'seconds' , (COALESCE(l_in.sum_b, 0) + COALESCE(l_out.sum_b, 0)) / (COALESCE(t.seconds, 0) + 300) AS 'overall_bps' , COALESCE(l_in.protocol, "") AS 'in_protocols' , COALESCE(l_out.protocol, "") AS 'out_protocols' FROM ( SELECT ipstart, subnet, alias AS 'hostname' FROM {nodes_table} WHERE ipstart = $start AND ipend = $end ) AS n LEFT JOIN ( SELECT $start AS 's1' , COUNT(DISTINCT dst) AS 'unique_out_ip' , (SELECT COUNT(1) FROM (SELECT DISTINCT src, dst, port FROM {links_table} WHERE src BETWEEN $start AND $end) AS `temp1`) AS 'unique_out_conn' , SUM(links) AS 'total_out' , SUM(bytes_sent) AS 'b_s' , SUM(bytes_received) AS 'b_r' , MAX((bytes_sent + bytes_received) * 1.0 / duration) AS 'max_bps' , SUM(bytes_sent + bytes_received) AS 'sum_b' , SUM(packets_sent) AS 'p_s' , SUM(packets_received) AS 'p_r' , SUM(duration * links) AS 'sum_duration' , GROUP_CONCAT(DISTINCT protocol) AS 'protocol' FROM {links_table} WHERE src BETWEEN $start AND $end GROUP BY 's1' ) AS l_out ON n.ipstart = l_out.s1 LEFT JOIN ( SELECT $start AS 's1' , COUNT(DISTINCT src) AS 'unique_in_ip' , (SELECT COUNT(1) FROM (SELECT DISTINCT src, dst, port FROM {links_table} WHERE dst BETWEEN $start AND $end) AS `temp2`) AS 'unique_in_conn' , SUM(links) AS 'total_in' , SUM(bytes_sent) AS 'b_s' , SUM(bytes_received) AS 'b_r' , MAX((bytes_sent + bytes_received) * 1.0 / duration) AS 'max_bps' , SUM(bytes_sent + bytes_received) AS 'sum_b' , SUM(packets_sent) AS 'p_s' , SUM(packets_received) AS 'p_r' , SUM(duration * links) AS 'sum_duration' , COUNT(DISTINCT port) AS 'ports_used' , GROUP_CONCAT(DISTINCT protocol) AS 'protocol' FROM {links_table} WHERE dst BETWEEN $start AND $end GROUP BY 's1' ) AS l_in ON n.ipstart = l_in.s1 LEFT JOIN ( SELECT $start AS 's1' , COUNT(ipstart) AS 'endpoints' FROM {nodes_table} WHERE ipstart = ipend AND ipstart BETWEEN $start AND $end ) AS children ON n.ipstart = children.s1 LEFT JOIN ( SELECT $start AS 's1' , {elapsed} AS 'seconds' FROM {links_table} GROUP BY 's1' ) AS t ON n.ipstart = t.s1 LIMIT 1; """.format( address_q=sam.common.db_concat(self.db, 'decodeIP(n.ipstart)', "'/'", 'n.subnet'), elapsed=self.elapsed, nodes_table=self.table_nodes, links_table=self.table_links) results = self.db.query(query, vars=qvars) first = results.first() if first: return first else: return {} def build_where_clause(self, timestamp_range=None, port=None, protocol=None, rounding=True): """ Build a WHERE SQL clause that covers basic timerange, port, and protocol filtering. :param timestamp_range: start and end times as unix timestamps (integers). Default is all time. :type timestamp_range: tuple[int, int] :param port: exclusively report traffic destined for this port, if specified. :type port: int or str :param protocol: exclusively report traffic using this protocol :type protocol: str :param rounding: round each time stamp to the nearest quantization mark. (db records are quantized for consiceness) :type rounding: bool :return: String SQL clause :rtype: str """ clauses = [] t_start = 0 t_end = 0 if timestamp_range: t_start = timestamp_range[0] t_end = timestamp_range[1] if rounding: # rounding to 5 minutes, for use with the Syslog table if t_start > 150: t_start -= 150 if t_end <= 2 ** 31 - 150: t_end += 149 if self.db.dbname == 'sqlite': clauses.append("timestamp BETWEEN $tstart AND $tend") else: clauses.append("timestamp BETWEEN FROM_UNIXTIME($tstart) AND FROM_UNIXTIME($tend)") if port: clauses.append("port = $port") if protocol: clauses.append("protocols LIKE $protocol") protocol = "%{0}%".format(protocol) qvars = {'tstart': t_start, 'tend': t_end, 'port': port, 'protocol': protocol} where = str(web.db.reparam("\n AND ".join(clauses), qvars)) if where: where = " AND " + where return where def get_details_connections(self, inbound, page=1, order="-links", simple=False): sort_options = ['links', 'src', 'dst', 'port', 'sum_bytes', 'sum_packets', 'protocols', 'avg_duration'] sort_options_simple = ['links', 'src', 'dst', 'port'] qvars = { 'table_links': self.table_links, 'start': self.ip_start, 'end': self.ip_end, 'page': self.page_size * (page - 1), 'page_size': self.page_size, 'WHERE': self.build_where_clause(self.time_range, self.port) } if inbound: qvars['collected'] = "src" qvars['filtered'] = "dst" else: qvars['filtered'] = "src" qvars['collected'] = "dst" # determine the sort direction if order and order[0] == '-': sort_dir = "DESC" else: sort_dir = "ASC" # determine the sort column if simple: if order and order[1:] in sort_options_simple: sort_by = order[1:] else: sort_by = sort_options_simple[0] else: if order and order[1:] in sort_options: sort_by = order[1:] else: sort_by = sort_options[0] # add table prefix for some columns if sort_by in ['port', 'src', 'dst']: sort_by = "`links`." + sort_by qvars['order'] = "{0} {1}".format(sort_by, sort_dir) if simple: query = """ SELECT decodeIP({collected}) AS '{collected}' , port AS 'port' , sum(links) AS 'links' FROM {table_links} AS `links` WHERE {filtered} BETWEEN $start AND $end {WHERE} GROUP BY `links`.{collected}, `links`.port ORDER BY {order} LIMIT {page}, {page_size} """.format(**qvars) else:
query = """
SELECT src, dst, port, links, protocols , sum_bytes , (sum_bytes / links) AS 'avg_byte
#!/usr/bin/env python """ This file transfer example demonstrates a couple of things: 1) Transferring files using Axolotl to encrypt each block of the transfer with a different ephemeral key. 2) Using a context manager with Axolotl. The utility will prompt you for the location of the Axolotl key database and the blocksize. The blocksize must be chosen so that the maximum number of blocks is <= 255. Security is optimized by a larger number of blocks, and transfer speed is optimized by a smaller number of blocks. If you choose incorrectly, the utility will prompt you with a recommendation. Key databases can be generated using e.g the init_conversation.py utility. Syntax for receive is: ./transfer.py -r Syntax for send is: ./transfer.py -s <filename> <target hostname or ip address> The end of packet (EOP) and end of file (EOF) markers I use are pretty simple, but unlikely to show up in ciphertext. """ from pyaxo import Axolotl from contextlib import contextmanager import sys import socket import os try: location = raw_input('Database directory (default ~/.bin)? ').strip() if location == '': location = '~/.bin' location = os.path.expanduser(location) if sys.argv[1] == '-s': file_name = sys.argv[2] host = sys.argv[3] size = int(raw_input('File transfer block size? ')) port = 50000 except IndexError: print 'Usage: ' + sys.argv[0] + ' -(s,r) [<filename> <host>]' exit() backlog = 1 @contextmanager def socketcontext(*args, **kwargs): s = socket.socket(*args, **kwargs) yield s s.close() @contextmanager def axo(my_name, other_name, dbname, dbpassphrase): a = Axolotl(my_name, dbname=dbname, dbpassphrase=dbpassphrase) a.loadState(my_name, other_name) yield a a.saveState() if sys.argv[1] == '-s': # open socket and send data with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.connect((host, port)) with axo('send', 'receive', dbname=location+'/send.db', dbpassphrase='1') as a: with open(file_name, 'rb') as f: plaintext = f.read() plainlength = len(plaintext) while plainlength/size > 253: print 'File too large to transfer - increase size parameter' print 'Recommended >= ' + str(plainlength/128) + ' bytes per block' size = int(raw_input('File transfer block size? ')) plaintext = str(len(file_name)).zfill(2) + file_name + plaintext while len(plaintext) > size: msg = plaintext[:size] if msg == '': break plaintext = plaintext[size:] ciphertext = a.encrypt(msg) s.send(ciphertext + 'EOP') if len(plaintext) != 0: ciphertext = a.encrypt(plaintext) s.send(ciphertext + 'EOF') # receive confirmation confirmation = s.recv(1024) if a.decrypt(confirmation) == 'Got It!': print 'Transfer confirmed!' else: print 'Transfer not confirmed...' if sys.argv[1] == '-r': # open socket and receive data with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) host = '' s.bind((host, port)) s.listen(backlog) client, address = s.accept() with axo('receive', 'send', dbname=location+'/receive.db', dbpassphrase='1') as a: plaintext = '' ciphertext = '' while True: newtext = client.recv(1024) ciphertext += newtext if ciphertext[-3:] == 'EOF': break if ciphertext == '': print 'nothing received' exit() cipherlist = ciphertext.split('EOP') for item in cipherlist: if item[-3:] == 'EOF': item = item[:-3] plaintext += a.decrypt(item) filenamelength = int(plaintext[:2])
file_name = plaintext[2:2+filenamelength] with open(file_name, 'wb') as f: f.write(plaintext[2+filenamelength:]) # send confirmation reply
= a.encrypt('Got It!') client.send(reply) print file_name + ' received'
struction_context = InstructionContext() class InstructionFormat(object): """ Every instruction opcode has a corresponding instruction format which determines the number of operands and their kinds. Instruction formats are identified structurally, i.e., the format of an instruction is derived from the kinds of operands used in its declaration. The instruction format stores two separate lists of operands: Immediates and values. Immediate operands (including entity references) are represented as explicit members in the `InstructionData` variants. The value operands are stored differently, depending on how many there are. Beyond a certain point, instruction formats switch to an external value list for storing value arguments. Value lists can hold an arbitrary number of values. All instruction formats must be predefined in the :py:mod:`cranelift.formats` module. :param kinds: List of `OperandKind` objects describing the operands. :param name: Instruction format name in CamelCase. This is used as a Rust variant name in both the `InstructionData` and `InstructionFormat` enums. :param typevar_operand: Index of the value input operand that is used to infer the controlling type variable. By default, this is `0`, the first `value` operand. The index is relative to the values only, ignoring immediate operands. """ # Map (imm_kinds, num_value_operands) -> format _registry = dict() # type: Dict[Tuple[Tuple[OperandKind, ...], int, bool], InstructionFormat] # noqa # All existing formats. all_formats = list() # type: List[InstructionFormat] def __init__(self, *kinds, **kwargs): # type: (*Union[OperandKind, Tuple[str, OperandKind]], **Any) -> None # noqa self.name = kwargs.get('name', None) # type: str self.parent = instruction_context # The number of value operands stored in the format, or `None` when # `has_value_list` is set. self.num_value_operands = 0 # Does this format use a value list for storing value operands? self.has_value_list = False # Operand fields for the immediate operands. All other instruction # operands are values or variable argument lists. They are all handled # specially. self.imm_fields = tuple(self._process_member_names(kinds)) # The typevar_operand argument must point to a 'value' operand. self.typevar_operand = kwargs.get('typevar_operand', None) # type: int if self.typevar_operand is not None: if not self.has_value_list: assert self.typevar_operand < self.num_value_operands, \ "typevar_operand must indicate a 'value' operand" elif self.has_value_list or self.num_value_operands > 0: # Default to the first 'value' operand, if there is one. self.typevar_operand = 0 # Compute a signature for the global registry. imm_kinds = tuple(f.kind for f in self.imm_fields) sig = (imm_kinds, self.num_value_operands, self.has_value_list) if sig in InstructionFormat._registry: raise RuntimeError( "Format '{}' has the same signature as existing format '{}'" .format(self.name, InstructionFormat._registry[sig])) InstructionFormat._registry[sig] = self InstructionFormat.all_formats.append(self) def args(self): # type: () -> FormatField """ Provides a ValueListField, which is derived from FormatField, corresponding to the full ValueList of the instruction format. This is useful for creating predicates for instructions which use variadic arguments. """ if self.has_value_list: return ValueListField(self) return None def _process_member_names(self, kinds): # type: (Sequence[Union[OperandKind, Tuple[str, OperandKind]]]) -> Iterable[FormatField] # noqa """ Extract names of all the immediate operands in the kinds tuple. Each entry is either an `OperandKind` instance, or a `(member, kind)` pair. The member names correspond to members in the Rust `InstructionData` data structure. Updates the fields `self.num_value_operands` and `self.has_value_list`. Yields the immediate operand fields. """ inum = 0 for arg in kinds: if isinstance(arg, OperandKind): member = arg.default_member k = arg else: member, k = arg # We define 'immediate' as not a value or variable arguments. if k is VALUE: self.num_value_operands += 1 elif k is VARIABLE_ARGS: self.has_value_list = True else: yield FormatField(self, inum, k, member) inum += 1 def __str__(self): # type: () -> str args = ', '.join( '{}: {}'.format(f.member, f.kind) for f in self.imm_fields) return '{}(imms=({}), vals={})'.format( self.name, args, self.num_value_operands) def __getattr__(self, attr): # type: (str) -> FormatField """ Make immediate instruction format members available as attributes. Each non-value format member becomes a corresponding `FormatField` attribute. """ for f in self.imm_fields: if f.member == attr: # Cache this field attribute so we won't have to search again.
setattr(self, attr, f) return f raise AttributeError( '
{} is neither a {} member or a ' .format(attr, self.name) + 'normal InstructionFormat attribute') @staticmethod def lookup(ins, outs): # type: (Sequence[Operand], Sequence[Operand]) -> InstructionFormat """ Find an existing instruction format that matches the given lists of instruction inputs and outputs. The `ins` and `outs` arguments correspond to the :py:class:`Instruction` arguments of the same name, except they must be tuples of :py:`Operand` objects. """ # Construct a signature. imm_kinds = tuple(op.kind for op in ins if op.is_immediate()) num_values = sum(1 for op in ins if op.is_value()) has_varargs = (VARIABLE_ARGS in tuple(op.kind for op in ins)) sig = (imm_kinds, num_values, has_varargs) if sig in InstructionFormat._registry: return InstructionFormat._registry[sig] # Try another value list format as an alternative. sig = (imm_kinds, 0, True) if sig in InstructionFormat._registry: return InstructionFormat._registry[sig] raise RuntimeError( 'No instruction format matches ' 'imms={}, vals={}, varargs={}'.format( imm_kinds, num_values, has_varargs)) @staticmethod def extract_names(globs): # type: (Dict[str, Any]) -> None """ Given a dict mapping name -> object as returned by `globals()`, find all the InstructionFormat objects and set their name from the dict key. This is used to name a bunch of global values in a module. """ for name, obj in globs.items(): if isinstance(obj, InstructionFormat): assert obj.name is None obj.name = name class FormatField(object): """ An immediate field in an instruction format. This corresponds to a single member of a variant of the `InstructionData` data type. :param iform: Parent `InstructionFormat`. :param immnum: Immediate operand number in parent. :param kind: Immediate Operand kind. :param member: Member name in `InstructionData` variant. """ def __init__(self, iform, immnum, kind, member): # type: (InstructionFormat, int, OperandKind, str) -> None self.format = iform self.immnum = immnum
DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'tjtest', # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': '', 'PASSWORD': '', 'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '5432', # Set to empty string for default. } } # Make this unique, and don't share it with anybody. SECRET_KEY = '' DEBUG = False # Hosts/domain names that are valid for this site; required if DEBUG is F
alse # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['tjugovich.webfactional.com'] # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' #
Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '/home/tjugovich/webapps/test_static'
""".. Ignore pydocstyle D400. =============== Signal Handlers =============== """ from asgiref.sync import async_to_sync from django.conf import settings from django.db import transaction from django.db.models.signals import post_delete, post_save from django.dispatch import receiver from resolwe.flow.managers import manager from resolwe.flow.models import Data, Relation from resolwe.flow.models.entity import RelationPartition def commit_signal(data_id): """Nudge manager at the end of every Data object save event.""" if not getattr(settings, "FLOW_MANAGER_DISABLE_AUTO_CALLS", False): immediate = getattr(settings, "FLOW_MANAGER_SYNC_AUTO_CALLS", False) async_to_sync(manager.communicate)(data_id=data_id, run_sync=immediate) @receiver(post_save, sender=Data) def manager_post_save_handler(sender, instance, created, **kwargs): """Run newly created (spawned) processes."""
if ( instance.status == Data.STATUS_DONE or instance.status == Data.STATUS_ERROR or created ): # Run manager at the end of the potential transaction. Otherwise # tasks are send to workers before transaction ends and therefore # workers
cannot access objects created inside transaction. transaction.on_commit(lambda: commit_signal(instance.id)) # NOTE: m2m_changed signal cannot be used because of a bug: # https://code.djangoproject.com/ticket/17688 @receiver(post_delete, sender=RelationPartition) def delete_relation(sender, instance, **kwargs): """Delete the Relation object when the last Entity is removed.""" def process_signal(relation_id): """Get the relation and delete it if it has no entities left.""" try: relation = Relation.objects.get(pk=relation_id) except Relation.DoesNotExist: return if relation.entities.count() == 0: relation.delete() # Wait for partitions to be recreated. transaction.on_commit(lambda: process_signal(instance.relation_id))
# coding: utf-8 # # Copyright 2012 NAMD-EMAP-FGV # # This file is part of PyPLN. You can get more information at: http://pypln.org/. # # PyPLN is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by #
the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyPLN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public Lic
ense for more details. # # You should have received a copy of the GNU General Public License # along with PyPLN. If not, see <http://www.gnu.org/licenses/>. from mongodict import MongoDict from nltk import word_tokenize, sent_tokenize from pypln.backend.celery_task import PyPLNTask class Tokenizer(PyPLNTask): def process(self, document): text = document['text'] tokens = word_tokenize(text) sentences = [word_tokenize(sent) for sent in sent_tokenize(text)] return {'tokens': tokens, 'sentences': sentences}
FTP_SERVER = "stage.mozilla.org" FTP_USER = "ffxbld" FTP_SSH_KEY = "~/.ssh/ffxbld_dsa" FTP_UPLOAD_BASE_DIR = "/pub/mozilla.org/mobile/candidates/%(version)s-candidates/build%(buildnum)d" DOWNLOAD_BASE_URL = "http://%s%s" % (FTP_SERVER, FTP_UPLOAD_BASE_DIR) APK_BASE_NAME = "fennec-%(version)s.%(locale)s.android-arm.apk" HG_SHARE_BASE_DIR = "/builds/hg-shared" KEYSTORE = "/home/cltsign/.android/android-release.keystore" KEY_ALIAS = "release" config = { "log_name": "partner_repack", "locales_file": "buildbot-configs/mozilla/l10n-changesets_mobile-release.json", "additional_locales": ['en-US'], "platforms": ["android"], "repos": [{ "repo": "http://hg.mozilla.org/build/buildbot-configs", "revision": "default", }], 'vcs_share_base': HG_SHARE_BASE_DIR, "ftp_upload_base_dir": FTP_UPLOAD_BASE_DIR, "ftp_ssh_key": FTP_SSH_KEY, "ftp_user": FTP_USER, "ftp_server": FTP_SERVER, "installer_base_names": { "android": APK_BASE_NAME, }, "partner_config": { "google-play": {}, }, "download_unsigned_base_subdir": "unsigned/%(platform)s/%(locale)s", "download_base_url": DOWNLOAD_BASE_URL, "release_config_file": "buildbot-configs/mozilla/release-fennec-mozilla-re
lease.py", "default_actions": ["clobber", "pull", "download", "repack", "upload-unsigned-bits"], # signing (optional) "keystore": KEYSTORE, "key_alias": KEY_ALIAS, "exes": { "jarsigner": "/tools/jdk-1.6.0_17/bin/jarsigner", "zipalign": "/tools/android-sdk-r8/tools/zipalign", }, }
""" Implements compartmental model of a passive cable. See Neuronal Dynamics `Chapter 3 Section 2 <http://neuronaldynamics.epfl.ch/online/Ch3.S2.html>`_ """ # This file is part of the exercise code repository accompanying # the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch) # located at http://github.com/EPFL-LCN/neuronaldynamics-exercises. # This free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License 2.0 as published by the # Free Software Foundation. You should have received a copy of the # GNU General Public License along with the repository. If not, # see http://www.gnu.org/licenses/. # Should you reuse and publish the code for your own purposes, # please cite the book or point to the webpage http://neuronaldynamics.epfl.ch. # Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski. # Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition. # Cambridge University Press, 2014. import brian2 as b2 from neurodynex3.tools import input_factory import matplotlib.pyplot as plt import numpy as np # integration time step in milliseconds b2.defaultclock.dt = 0.01 * b2.ms # DEFAULT morphological and electrical parameters CABLE_LENGTH = 500. * b2.um # length of dendrite CABLE_DIAMETER = 2. * b2.um # diameter of dendrite R_LONGITUDINAL = 0.5 * b2.kohm * b2.mm # Intracellular medium resistance R_TRANSVERSAL = 1.25 * b2.Mohm * b2.mm ** 2 # cell membrane resistance (->leak current) E_LEAK = -70. * b2.mV # reversal potential of the leak current (-> resting potential) CAPACITANCE = 0.8 * b2.uF / b2.cm ** 2 # membrane capacitance DEFAULT_INPUT_CURRENT = input_factory.get_step_current(2000, 3000, unit_time=b2.us, amplitude=0.2 * b2.namp) DEFAULT_INPUT_LOCATION = [CABLE_LENGTH / 3] # provide an array of locations # print("Membrane Timescale = {}".format(R_TRANSVERSAL*CAPACITANCE)) def simulate_passive_cable(current_injection_location=DEFAULT_INPUT_LOCATION, input_current=DEFAULT_INPUT_CURRENT, length=CABLE_LENGTH, diameter=CABLE_DIAMETER, r_longitudinal=R_LONGITUDINAL, r_transversal=R_TRANSVERSAL, e_leak=E_LEAK, initial_voltage=E_LEAK, capacitance=CAPACITANCE, nr_compartments=200, simulation_time=5 * b2.ms): """Builds a multicompartment cable and numerically approximates the cable equation. Args: t_spikes (int): list of spike times current_injection_location (list): List [] of input locations (Quantity, Length): [123.*b2.um] input_current (TimedArray): TimedArray of current amplitudes. One column per current_injection_location. length (Quantity): Length of the cable: 0.8*b2.mm diameter (Quantity): Diameter of the cable: 0.2*b2.um r_longitudinal (Quantity): The longitudinal (axial) resistance of the cable: 0.5*b2.kohm*b2.mm r_transversal (Quantity): The transversal resistance (=membrane resistance): 1.25*b2.Mohm*b2.mm**2 e_leak (Quantity): The reversal potential of the leak current (=resting potential): -70.*b2.mV initial_voltage (Quantity): Value of the potential at t=0: -70.*b2.mV capacitance (Quantity): Membrane capacitance: 0.8*b2.uF/b2.cm**2 nr_compartments (int): Number of compartments. Spatial discretization: 200 simulation_time (Quantity): Time for which the dynamics are simulated: 5*b2.ms Returns: (StateMonitor, SpatialNeuron): The state monitor contains the membrane voltage in a Time x Location matrix. The SpatialNeuron object specifies the simulated neuron model and gives access to the morphology. You may want to use those objec
ts for spatial indexing: myVoltageStateMonitor[mySpatialNeuron.morphology[0.123*b2.um]].v """ assert isinstance(input_current, b2.TimedArray), "input_current is not of type TimedArray" assert input_current.values.shape[1] == len(current_injection_location),\ "number of injection_locations does not match nr of input currents" cable_morphology = b2.Cylinder(diameter=diameter, length=length, n=nr_compartments) # Im is transmembrane current # Iext is injected curren
t at a specific position on dendrite EL = e_leak RT = r_transversal eqs = """ Iext = current(t, location_index): amp (point current) location_index : integer (constant) Im = (EL-v)/RT : amp/meter**2 """ cable_model = b2.SpatialNeuron(morphology=cable_morphology, model=eqs, Cm=capacitance, Ri=r_longitudinal) monitor_v = b2.StateMonitor(cable_model, "v", record=True) # inject all input currents at the specified location: nr_input_locations = len(current_injection_location) input_current_0 = np.insert(input_current.values, 0, 0., axis=1) * b2.amp # insert default current: 0. [amp] current = b2.TimedArray(input_current_0, dt=input_current.dt * b2.second) for current_index in range(nr_input_locations): insert_location = current_injection_location[current_index] compartment_index = int(np.floor(insert_location / (length / nr_compartments))) # next line: current_index+1 because 0 is the default current 0Amp cable_model.location_index[compartment_index] = current_index + 1 # set initial values and run for 1 ms cable_model.v = initial_voltage b2.run(simulation_time) return monitor_v, cable_model def getting_started(): """A simple code example to get started. """ current = input_factory.get_step_current(500, 510, unit_time=b2.us, amplitude=3. * b2.namp) voltage_monitor, cable_model = simulate_passive_cable( length=0.5 * b2.mm, current_injection_location=[0.1 * b2.mm], input_current=current, nr_compartments=100, simulation_time=2 * b2.ms) # provide a minimal plot plt.figure() plt.imshow(voltage_monitor.v / b2.volt) plt.colorbar(label="voltage") plt.xlabel("time index") plt.ylabel("location index") plt.title("vm at (t,x), raw data voltage_monitor.v") plt.show() if __name__ == "__main__": getting_started()
import numpy as np import bisect import pygame import scipy.signal from albow.widget import Widget, overridable_property from albow.theme import ThemeProperty class SignalRendererWidget(Widget): def __init__(self, signal_list, dev, buf, rect, **kwds): """ Initialize the renderer with the signal_name to index mapping (always all 14 signals). The measurement device, the signal buffer and the rectangle into which the signals are to be rendered. To select shown signals, use select_channels. """ Widget.__init__(self, rect, **kwds) self.sig_list = signal_list self.dev = dev self.buf = buf self.font = pygame.font.SysFont("Ubuntu", 20, True) self.cq_font = pygame.font.SysFont("Ubuntu", 16, True) self.multiplier = 1.0 self.selected = range(14) self.display_type = [0] * 14 def select_channels(self, which): """ Supply a new array of integers which indicate the signals to show. """ self.selected = which def toggle_channel(self, ndx): """ Toggle the display of channel with index ndx (0..13). """ if ndx in self.selected: # if self.display_type[ndx] == 1: # self.selected.remove(ndx) # else: # self.display_type[ndx] = 1 self.selected.remove(ndx) else: # need to re-sort the list after the append bisect.insort(self.selected, ndx) self.display_type[ndx] = 0 def update_magnification(self, update): """ Set the magnification of the displayed signal. """ self.multiplier = max(0.2, self.multiplier + update) def render_time_series(self, sig, color, frame, surf): """ Render a time series representation (given by pts) into rect. """ # draw the zero level zero_ax_y = frame.top + frame.height // 2 pygame.draw.line(surf, (70, 70, 70), (frame.left, zero_ax_y), (frame.right, zero_ax_y)) pygame.draw.line(surf, (20, 60, 20, 30), (frame.left, frame.bottom), (frame.right, frame.bottom)) # draw the signal onto the screen (remove mean in buffer) zero_lev = np.mean(sig) sig_amp = max(np.max(sig) - zero_lev, zero_lev - np.min(sig)) if sig_amp == 0: sig_amp = 1.0 # pixel_per_lsb = self.multiplier * frame.height / sig_amp / 2.0 pixel_per_lsb = self.mu
ltiplier * frame.height / (200.0 / 0.51) draw_pts_y
= zero_ax_y - (sig - zero_lev) * pixel_per_lsb draw_pts_y[draw_pts_y < frame.top] = frame.top draw_pts_y[draw_pts_y > frame.bottom] = frame.bottom draw_pts_x = np.linspace(0, frame.width, len(sig)) + frame.left pygame.draw.lines(surf, color, False, zip(draw_pts_x, draw_pts_y)) # draw a bar that corresponds to 10uV uV10_len = 10.0 / 0.51 * pixel_per_lsb if uV10_len > frame.height: uV10_len = frame.height * 3 // 4 uV10_col = (255, 0, 0) else: uV10_col = (0, 0, 0) pygame.draw.line(surf, uV10_col, (frame.right - 10, zero_ax_y - uV10_len // 2), (frame.right - 10, zero_ax_y + uV10_len // 2), 2) def render_spectrum(self, sig, color, frame, surf): """ Render a spectral representation of the signal. """ min_freq = 0.7 max_freq = 45.0 s2 = sig.copy() # special check for all zeros (no data situation) if np.all(s2 == 0.0): sp = np.zeros(shape = (s2.shape[0] // 2, )) else: tm = np.arange(len(sig), dtype = np.float64) / 128.0 angular_freqs = np.linspace(2.0 * np.pi * min_freq, 2.0 * np.pi * max_freq, 100) # pg = scipy.signal.lombscargle(tm, s2, angular_freqs) # sp = np.sqrt(4 * (pg / tm.shape[0])) s2 = s2 - np.mean(s2) sp = np.abs(np.fft.rfft(s2)) # if there are any non-finite values, replace buffer with zeros if not np.all(np.isfinite(sp)): sp[:] = 0.0 # autoscale the spectral display # sp -= np.amin(sp) sig_amp = np.amax(sp) if sig_amp == 0: sig_amp = 1.0 pixel_per_lsb = self.multiplier * frame.height / sig_amp / 2.0 draw_pts_y = frame.bottom - sp * pixel_per_lsb draw_pts_x = np.linspace(0, frame.width, len(sp)) + frame.left # draw line at bottom of frame pygame.draw.line(surf, (20, 60, 20, 30), (frame.left, frame.bottom), (frame.right, frame.bottom)) # draw the spectrum in dB pygame.draw.lines(surf, color, False, zip(draw_pts_x, draw_pts_y)) # draw spectral bands for f in [5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0]: x = (f - min_freq) / max_freq * frame.width + frame.left pygame.draw.line(surf, (0, 0, 0), (x, frame.top), (x, frame.bottom)) # fixme: draw 20dB? yardstick def render_name_and_contact_quality(self, chan_name, frame, surf): # draw a bar indicating contact quality cq = self.dev.cq[chan_name] cr, cr_str = self.dev.contact_resistance(chan_name) # map signal resistance to color if cr is None or cr > 1000: quality_color = (255, 0, 0) elif cr > 50: quality_color = (200, 100, 20) elif cr > 20: quality_color = (200, 100, 20) else: quality_color = (20, 150, 20) zero_ax_y = frame.top + frame.height // 2 surf.blit(self.font.render(chan_name, 1, (0,0,0)), (frame.right - 150, zero_ax_y - 10)) surf.blit(self.cq_font.render('%d (%s)' % (cq, cr_str), 1, quality_color), (frame.right - 150, zero_ax_y + 10)) def draw(self, surf): """ Draw the signals. Here we expect the signal buffer to be updated. """ frame = surf.get_rect() pygame.draw.rect(surf, (255,255,255), frame) # plot the signals Nsig = len(self.selected) if Nsig == 0: return gr_height = (frame.bottom - frame.top) // Nsig gr_width = frame.width # get a handle to the buffer self.buf.pull_packets(self.dev) buf = self.buf.buffer() # for each signal repeat for s, sndx in zip(self.selected, range(len(self.selected))): # retrieve channel name chan_name = self.sig_list[s] # compute target rectangle rect = pygame.Rect(frame.left, frame.top + gr_height * sndx, frame.width, gr_height) # render a time series representation color = (255, 0, 0) if sndx % 2 == 0 else (0, 0, 255) if self.display_type[s] == 0: self.render_time_series(buf[:,s], color, rect, surf) else: self.render_spectrum(buf[:,s], color, rect, surf) # draw the signal name self.render_name_and_contact_quality(chan_name, rect, surf)
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .tableitems import * from .tabledisplay import * from ._version import version_info
, __version__ from .handlers import load_jupyter_server_extension from .commands import parse def _jupyter_nbextension_paths(): return [{ 'section': 'notebook', 'src': 'static', 'dest': 'beakerx_tabledisplay', 'require': 'beakerx_tabledisplay/index' } ] def _jupyt
er_server_extension_paths(): return [dict(module="beakerx_tabledisplay")] def run(): try: parse() except KeyboardInterrupt: return 130 return 0
""" Exceptions: mccabehalsted: there are triple curly brackets ({{{) in jm1 that jekyll doesn't like spe: is in "other" directory in terapromise reuse: double curly brackets in reuse dump: links end like "/ABACUS2013" without closing slash """ relativePath = "defect/ck/" import os, re, datetime from types import NoneType def extractSummary(fileContents): return re.search("^#summary ([^\n]+)\n", fileContents).group(1) def extractAuthor(fileContents): results = re.search(r"\|\| Donated by (\[[^ ]* )?([^\]|]+)\]? \|\|", fileContents) if type(results.group(2)) == NoneType: return results.group(1) else: return results.group(2) def genHeader(baseName, fileContents): summary = extractSummary(fileContents) author = extractAuthor(fileContents) return """--- title: """ + baseName + """ excerpt: """ + summary + """ layout: repo author: """ + author + """ --- """ def doDeletions(fileContents): return re.sub(r"#summary [^\n]+\n#labels [^\n]+\n\n<wiki:toc max_depth=\"2\" />", "", fileContents) def changeHeaders(fileContents): return re.sub(r"\n= ([^\n]+) =\n", r"\n#\1\n", fileContents) def reformatLinks(fileContents): sub = re.sub(r"[^\[]http([^\s]+)", r"[http\1 http\1]", fileContents) return re.sub(r"\[([^ ]+) ([^\]]+)\]", r"[\2](\1)", sub) def changeURLs(fileContents, relativePath): hasHiddenParentQ = (type(re.search(r"\d$", baseName)) != NoneType) and (relativePath == "defect/mccabehalsted/") teraPromiseRelativePath = relativePath + baseName if hasHiddenParentQ: teraPromiseRelativePath = relativePath + baseName[:-1] + "/" + baseName sub = re.sub("http://promisedata.googlecode.com/svn/trunk/[^/]+/(" + baseName + "/)?", "https://terapromise.csc.ncsu.edu:8443/svn/repo/" + teraPromiseRelativePath + r"/", fileContents) return re.sub("http://code.google.com/p/promisedata/source/browse/trunk/[^/]+/(" + baseName + "/)?", "https://terapromise.csc.ncsu.edu:8443/svn/repo/" + teraPromiseRelativePath + r"/", sub) def removeExtraneousLinks(fileContents): return fileContents def reformatTables(fileContents): sub = re.sub(r"\|\| When \|\| What \|\|", r"When | What\r---- | ----", fileContents) return re.sub(r"\|\| ([^|]+) \|\| ([^|]+) \|\|", r"\1 | \2", sub) def escapeCurlyBrackets(fileContents): sub = re.sub(r"{", r"\{", fileContents) return re.sub(r"}", r"\}", sub) def extractDateString(fileContents): result = re.search(r"\n\|\| *([^ |]+ [^ |]+ [^ |]+) *\|\| Donated by[^|]+\|\|", fileContents).group(1) return result def dateAddedString(fileContents): dateString = extractDateString(fileContents) date = datetime.datetime.strptime(dateString, "%B %d, %Y").date() return date.strftime("%Y-%m-%d-") directory = "/Users/Carter/Documents/OpenSciences/opensciences.github.io/repo/" + relativePath + "_posts/" writeDirPath = "/Users/Carter/Documents/OpenSciences/opensciences.github.io/repo/" + relativePath + "_posts/" for subdir, dirs, files in os.walk(directory): for eachFileName in files: print(eachFileName) if eachFileName[-5:] != ".wiki": continue readFilePath = directory + eachFileName baseName = os.path.basename(readFilePath)[:-5] readObj = file(readFilePath, "r") fileContents = readObj.read() readObj.close() newFileName = dateAddedString(fileContents) + os.path.basename(readFilePath)[:-5] + ".md" newFilePath = directory + newFileName header = genHeader(bas
eName, fileContents) fileContents = doDeletions(fileContents) fileContents = changeHeaders(fileContents) fileContents = reformatLinks(fileContents) fileContents = changeURLs(fileContents, relativePath) fileContents = removeExtr
aneousLinks(fileContents) fileContents = reformatTables(fileContents) fileContents = escapeCurlyBrackets(fileContents) writeObj = file(newFilePath, "w") writeObj.write(header + fileContents) writeObj.close()
from apetools.baseclass import BaseClass from apetools.tools import copyfiles from apetools.log_setter import LOGNAME from apetools.proletarians import teardown class TearDownBuilder(BaseClass): """ A basic tear-down builder that just copies log and config files. """ def __init__(self, configfilename, storage, subdir="logs"): """ :param: - `configfilename`: the name of the config file to copy - `storage`: A storage object aimed at the data folder. """ super(TearDownBuilder, self).__init__() self.configfilename = configfilename self.storage = storage self.subdir = subdir self._configcopier = None self._logcopier = None self._teardown = None return @property def configcopier(self): """ :return: A file copier aimed at the config file """ if self._configcopier is None: self._configcopier = copyfiles.CopyFiles((self.configfilename,), self.storage, self.subdir) return self._configcopier @property def logcopier(self): """ :return: A file copier aimed at the log file """
if self._logcopier is None: self._logcopier = copyfiles.CopyFiles((LOGNAME,),
self.storage, self.subdir) return self._logcopier @property def teardown(self): """ :return: A teardown object for the test-operator to run to cleanup """ if self._teardown is None: self._teardown = teardown.TeardownSession((self.configcopier, self.logcopier)) return self._teardown # End class TearDownBuilder
# This file is part of Indico. # Copyright (C) 2002 - 2016 European Organization for Nuclear Re
search (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public Lic
ense for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. """ Just for backwards-compatibility """ from indico.util.contextManager import *
import unittest from libs.funcs import * class TestFuncs(unittest.TestCase): def test_buildPaths(self): recPaths, repPaths, rouPaths, corePaths = buildPaths() findTxt = lambda x, y: x.find(y) > -1 assert findTxt(recPaths["Task"][0], "base") assert findTxt(recPaths["Department"][0], "StdPy") assert findTxt(recPaths["Department"][1], "standard") assert findTxt(repPaths["ListWindowReport"][0], "base") assert findTxt(repPaths["ExpensesList"][0], "StdPy") assert findTxt(repPaths["ExpensesList"][1], "standard") assert findTxt(rouPaths["GenNLT"][0], "StdPy") assert findTxt(rouPaths["GenNLT"][1], "standard") assert findTxt(corePaths["Field"][0], "embedded") self.assertFalse([k for (k, v) in rouPaths.iteritems() if findTxt(v[0], "base")]) #no routines in base def test_recordInheritance(self): recf, recd = getRecordInheritance("Invoice") assert all([f1 in recf for f1 in ("Sales
Man", "InvoiceDate", "CustCode", "Currency", "ShiftDate", "OriginNr", "SerNr", "attachFlag")]) assert all(
[d in recd for d in ("CompoundItemCosts", "Payments", "Items", "Taxes", "Installs")]) recf, recd = getRecordInheritance("AccessGroup") assert all([f2 in recf for f2 in ("PurchaseItemsAccessType", "InitialModule", "Closed", "internalId")]) assert all([d in recd for d in ("PurchaseItems", "Customs", "Modules")]) def test_recordsInfo(self): recf, recd = getRecordsInfo("Department", RECORD) assert recf["Department"]["AutoCashCancel"] == "integer" #From StdPy assert recf["Department"]["DeptName"] == "string" #From standard assert recf["Department"]["Closed"] == "Boolean" #From Master assert recf["Department"]["internalId"] == "internalid" #From Record assert recd["Department"]["OfficePayModes"] == "DepartmentOfficePayModeRow" #Recordname from detail repf, repd = getRecordsInfo("Balance", REPORT) assert repf["Balance"]["LabelType"] == "string" #StdPy assert repf["Balance"]["ExplodeByLabel"] == "boolean" #Standard assert repf["Balance"]["internalId"] == "internalid" #Record assert not repd["Balance"] #Empty dict, no detail rouf, roud = getRecordsInfo("GenNLT", ROUTINE) assert rouf["GenNLT"]["ExcludeInvalid"] == "boolean" assert rouf["GenNLT"]["Table"] == "string" assert not roud["GenNLT"] rouf, roud = getRecordsInfo("LoginDialog", RECORD) assert rouf["LoginDialog"]["Password"] == "string" #embedded assert not roud["LoginDialog"] def test_classInfo(self): attr, meth = getClassInfo("Invoice") assert attr["DEBITNOTE"] == 2 assert attr["ATTACH_NOTE"] == 3 assert attr["rowNr"] == 0 assert attr["ParentInvoice"] == "SuperClass" assert isinstance(attr["DocTypes"], list) assert isinstance(attr["Origin"], dict) assert all([m in meth for m in ("getCardReader", "logTransactionAction", "updateCredLimit", "generateTaxes", "roundValue", "getOriginType", "bring", "getXML", "createField")]) assert meth["fieldIsEditable"][0] == "self" assert meth["fieldIsEditable"][1] == "fieldname" assert meth["fieldIsEditable"][2] == {"rowfieldname":'None'} assert meth["fieldIsEditable"][3] == {"rownr":'None'} attr, meth = getClassInfo("User") assert attr["buffer"] == "RecordBuffer" assert all([m in meth for m in ("store", "save", "load", "hasField")]) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(TestFuncs)) return suite if __name__ == '__main__': unittest.main(defaultTest='test_suite')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' ponysay - Ponysay, cowsay reimplementation for ponies Copyright (C) 2012, 2013, 2014 Erkin Batu Altunbaş et al. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. If you intend to redistribute ponysay or a fork of it commercially, it contains aggregated images, some of which may not be commercially redistribute, you would be required to remove those. To determine whether or not you may commercially redistribute an image make use that line ‘FREE: yes’, is included inside the image between two ‘$$$’ lines and the ‘FREE’ is and upper case and directly followed by the colon. ''' from common import * from ucs import * class Balloon(): ''' Balloon format class ''' def __init__(self, link, linkmirror, linkcross, ww, ee, nw, nnw, n, nne, ne, nee, e, see, se, sse, s, ssw, sw, sww, w, nww): ''' Constructor @param link:str The \-directional balloon line character @param linkmirror:str The /-directional balloon line character @param linkcross:str The /-directional balloon crossing a \-directional ballonon line character @param ww:str See the info manual @param ee:str See the info manual @param nw:list<str> See the info manual @param nnw:list<str> See the info manual @
param n:list<str> See the info manual @param nne:list<str> See the info manual @param ne:list<str> See the info manual @param nee:str See the info manual @param e:str
See the info manual @param see:str See the info manual @param se:list<str> See the info manual @param sse:list<str> See the info manual @param s:list<str> See the info manual @param ssw:list<str> See the info manual @param sw:list<str> See the info manual @param sww:str See the info manual @param w:str See the info manual @param nww:str See the info manual ''' (self.link, self.linkmirror, self.linkcross) = (link, linkmirror, linkcross) (self.ww, self.ee) = (ww, ee) (self.nw, self.ne, self.se, self.sw) = (nw, ne, se, sw) (self.nnw, self.n, self.nne) = (nnw, n, nne) (self.nee, self.e, self.see) = (nee, e, see) (self.sse, self.s, self.ssw) = (sse, s, ssw) (self.sww, self.w, self.nww) = (sww, w, nww) _ne = max(ne, key = UCS.dispLen) _nw = max(nw, key = UCS.dispLen) _se = max(se, key = UCS.dispLen) _sw = max(sw, key = UCS.dispLen) minE = UCS.dispLen(max([_ne, nee, e, see, _se, ee], key = UCS.dispLen)) minW = UCS.dispLen(max([_nw, nww, e, sww, _sw, ww], key = UCS.dispLen)) minN = len(max([ne, nne, n, nnw, nw], key = len)) minS = len(max([se, sse, s, ssw, sw], key = len)) self.minwidth = minE + minE self.minheight = minN + minS def get(self, minw, minh, lines, lencalc): ''' Generates a balloon with a message @param minw:int The minimum number of columns of the balloon @param minh:int The minimum number of lines of the balloon @param lines:list<str> The text lines to display @param lencalc:int(str) Function used to compute the length of a text line @return :str The balloon as a formated string ''' ## Get dimension h = self.minheight + len(lines) w = self.minwidth + lencalc(max(lines, key = lencalc)) if w < minw: w = minw if h < minh: h = minh ## Create edges if len(lines) > 1: (ws, es) = ({0 : self.nww, len(lines) - 1 : self.sww}, {0 : self.nee, len(lines) - 1 : self.see}) for j in range(1, len(lines) - 1): ws[j] = self.w es[j] = self.e else: (ws, es) = ({0 : self.ww}, {0 : self.ee}) rc = [] ## Create the upper part of the balloon for j in range(0, len(self.n)): outer = UCS.dispLen(self.nw[j]) + UCS.dispLen(self.ne[j]) inner = UCS.dispLen(self.nnw[j]) + UCS.dispLen(self.nne[j]) if outer + inner <= w: rc.append(self.nw[j] + self.nnw[j] + self.n[j] * (w - outer - inner) + self.nne[j] + self.ne[j]) else: rc.append(self.nw[j] + self.n[j] * (w - outer) + self.ne[j]) ## Encapsulate the message instead left and right edges of balloon for j in range(0, len(lines)): rc.append(ws[j] + lines[j] + ' ' * (w - lencalc(lines[j]) - UCS.dispLen(self.w) - UCS.dispLen(self.e)) + es[j]) ## Create the lower part of the balloon for j in range(0, len(self.s)): outer = UCS.dispLen(self.sw[j]) + UCS.dispLen(self.se[j]) inner = UCS.dispLen(self.ssw[j]) + UCS.dispLen(self.sse[j]) if outer + inner <= w: rc.append(self.sw[j] + self.ssw[j] + self.s[j] * (w - outer - inner) + self.sse[j] + self.se[j]) else: rc.append(self.sw[j] + self.s[j] * (w - outer) + self.se[j]) return '\n'.join(rc) @staticmethod def fromFile(balloonfile, isthink): ''' Creates the balloon style object @param balloonfile:str The file with the balloon style, may be `None` @param isthink:bool Whether the ponythink command is used @return :Balloon Instance describing the balloon's style ''' ## Use default balloon if none is specified if balloonfile is None: if isthink: return Balloon('o', 'o', 'o', '( ', ' )', [' _'], ['_'], ['_'], ['_'], ['_ '], ' )', ' )', ' )', ['- '], ['-'], ['-'], ['-'], [' -'], '( ', '( ', '( ') return Balloon('\\', '/', 'X', '< ', ' >', [' _'], ['_'], ['_'], ['_'], ['_ '], ' \\', ' |', ' /', ['- '], ['-'], ['-'], ['-'], [' -'], '\\ ', '| ', '/ ') ## Initialise map for balloon parts map = {} for elem in ('\\', '/', 'X', 'ww', 'ee', 'nw', 'nnw', 'n', 'nne', 'ne', 'nee', 'e', 'see', 'se', 'sse', 's', 'ssw', 'sw', 'sww', 'w', 'nww'): map[elem] = [] ## Read all lines in the balloon file with open(balloonfile, 'rb') as balloonstream: data = balloonstream.read().decode('utf8', 'replace') data = [line.replace('\n', '') for line in data.split('\n')] ## Parse the balloon file, and fill the map last = None for line in data: if len(line) > 0: if line[0] == ':': map[last].append(line[1:]) else: last = line[:line.index(':')] value = line[len(last) + 1:] map[last].append(value) ## Return the balloon return Balloon(map['\\'][0], map['/'][0], map['X'][0], map['ww'][0], map['ee'][0], map['nw'], map['nnw'], map['n'], map['nne'], map['ne'], map['nee'][0], map['e'][0], map['see'][0], map['se'], map['sse'], map['s'], map['ssw'], map['sw'], map['sww'][0], map['w'][0], map['nww'][0])
") oa01 = Column( String(10), comment="2001 Census Output Area (OA). (There are " "about 222,000, so ~300 population?)") casward = Column( String(6), comment="Census Area Statistics (CAS) ward [PROBABLY FK to " "cas_ward_2003.cas_ward_code]") park = Column( String(CODE_LEN), comment="National park [FK to " "park_national_park_2016.park_code]") lsoa01 = Column( String(CODE_LEN), comment="2001 Census Lower Layer Super Output Area (LSOA) [England & " "Wales, ~1,500 population] / Data Zone (DZ) [Scotland] / " "Super Output Area (SOA) [FK to one of: " "lsoa_lower_layer_super_output_area_england_wales_2004.lsoa_code; " # noqa "lsoa_lower_layer_super_output_area_n_ireland_2005.lsoa_code]") msoa01 = Column( String(CODE_LEN), comment="2001 Census Middle Layer Super Output Area (MSOA) [England & " "Wales, ~7,200 population] / " "Intermediate Zone (IZ) [Scotland] [FK to one of: " "msoa_middle_layer_super_output_area_england_wales_2004.msoa_code; " # noqa "iz_intermediate_zone_scotland_2005.iz_code]") ur01ind = Column( String(1),
comment="2001 Census urban/rural indicator [numeric in " "England/Wales/Scotland; letters in N. Ireland]") oac01 = Column( String(3), comment="2001 Census Output Area classification (OAC)" "[POSSIBLY FK to output_area_classification_2011." "subgroup_code]") oa
11 = Column( String(CODE_LEN), comment="2011 Census Output Area (OA) [England, Wales, Scotland;" " ~100-625 population] / Small Area (SA) [N. Ireland]") lsoa11 = Column( String(CODE_LEN), comment="2011 Census Lower Layer Super Output Area (LSOA) [England & " "Wales, ~1,500 population] / Data Zone (DZ) [Scotland] / " "Super Output Area (SOA) [N. Ireland] [FK to one of: " "lsoa_lower_layer_super_output_area_2011.lsoa_code; " # noqa " (defunct) dz_datazone_scotland_2011.dz_code]") msoa11 = Column( String(CODE_LEN), comment="2011 Census Middle Layer Super Output Area (MSOA) [England & " "Wales, ~7,200 population] / " "Intermediate Zone (IZ) [Scotland] [FK to one of: " "msoa_middle_layer_super_output_area_2011.msoa_code; " # noqa "iz_intermediate_zone_scotland_2011.iz_code]") parish = Column( String(CODE_LEN), comment="Parish/community [FK to " "parish_ncp_england_wales_2018.parish_code]") wz11 = Column( String(CODE_LEN), comment="2011 Census Workplace Zone (WZ)") ccg = Column( String(CODE_LEN), comment="Clinical Commissioning Group (CCG) / Local Health Board " "(LHB) / Community Health Partnership (CHP) / Local " "Commissioning Group (LCG) / Primary Healthcare Directorate " "(PHD) [FK to one of: " "ccg_clinical_commissioning_group_uk_2019." "ccg_ons_code, lhb_local_health_board_wales_2014.lhb_code]") bua11 = Column( String(CODE_LEN), comment="Built-up Area (BUA) [FK to " "bua_built_up_area_uk_2013.bua_code]") buasd11 = Column( String(CODE_LEN), comment="Built-up Area Sub-division (BUASD) [FK to " "buasd_built_up_area_subdivision_uk_2013.buas_code]") ru11ind = Column( String(2), comment="2011 Census rural-urban classification") oac11 = Column( String(3), comment="2011 Census Output Area classification (OAC) [FK to " "output_area_classification_2011.subgroup_code]") lat = Column( Numeric(precision=9, scale=6), comment="Latitude (degrees, 6dp)") long = Column( Numeric(precision=9, scale=6), comment="Longitude (degrees, 6dp)") lep1 = Column( String(CODE_LEN), comment="Local Enterprise Partnership (LEP) - first instance [FK to " "lep_local_enterprise_partnership_england_2017.lep1_code]") lep2 = Column( String(CODE_LEN), comment="Local Enterprise Partnership (LEP) - second instance [FK to " "lep_local_enterprise_partnership_england_2017.lep1_code]") pfa = Column( String(CODE_LEN), comment="Police Force Area (PFA) [FK to " "pfa_police_force_area_2015.pfa_code]") imd = Column( Integer, comment="Index of Multiple Deprivation (IMD) [rank of LSOA/DZ, where " "1 is the most deprived, within each country] [FK to one of: " "imd_index_multiple_deprivation_england_2015.imd_rank; " "imd_index_multiple_deprivation_n_ireland_2010.imd_rank; " "imd_index_multiple_deprivation_scotland_2012.imd_rank; " "imd_index_multiple_deprivation_wales_2014.imd_rank]") # New in Nov 2019 ONSPD, relative to 2016 ONSPD: # ** Not yet implemented: # calncv # ced # nhser # rgn # stp def __init__(self, **kwargs: Any) -> None: convert_date(kwargs, 'dointr') convert_date(kwargs, 'doterm') convert_int(kwargs, 'usertype') convert_int(kwargs, 'oseast1m') convert_int(kwargs, 'osnrth1m') convert_int(kwargs, 'osgrdind') convert_int(kwargs, 'streg') convert_int(kwargs, 'edind') convert_int(kwargs, 'imd') kwargs['pcd_nospace'] = kwargs['pcd'].replace(" ", "") super().__init__(**kwargs) # ============================================================================= # Models: core lookup tables # ============================================================================= class OAClassification(Base): """ Represents 2011 Census Output Area (OA) classification names/codes. """ __filename__ = "2011 Census Output Area Classification Names and Codes " \ "UK.xlsx" __tablename__ = "output_area_classification_2011" oac11 = Column(String(3), primary_key=True) supergroup_code = Column(String(1)) supergroup_desc = Column(String(35)) group_code = Column(String(2)) group_desc = Column(String(40)) subgroup_code = Column(String(3)) subgroup_desc = Column(String(60)) def __init__(self, **kwargs: Any) -> None: rename_key(kwargs, 'OAC11', 'oac11') rename_key(kwargs, 'Supergroup', 'supergroup_desc') rename_key(kwargs, 'Group', 'group_desc') rename_key(kwargs, 'Subgroup', 'subgroup_desc') kwargs['supergroup_code'] = kwargs['oac11'][0:1] kwargs['group_code'] = kwargs['oac11'][0:2] kwargs['subgroup_code'] = kwargs['oac11'] super().__init__(**kwargs) class BUA(Base): """ Represents England & Wales 2013 build-up area (BUA) codes/names. """ __filename__ = "BUA_names and codes UK as at 12_13.xlsx" __tablename__ = "bua_built_up_area_uk_2013" bua_code = Column(String(CODE_LEN), primary_key=True) bua_name = Column(String(NAME_LEN)) def __init__(self, **kwargs: Any) -> None: rename_key(kwargs, 'BUA13CD', 'bua_code') rename_key(kwargs, 'BUA13NM', 'bua_name') super().__init__(**kwargs) class BUASD(Base): """ Represents built-up area subdivisions (BUASD) in England & Wales 2013. """ __filename__ = "BUASD_names and codes UK as at 12_13.xlsx" __tablename__ = "buasd_built_up_area_subdivision_uk_2013" buasd_code = Column(String(CODE_LEN), primary_key=True) buasd_name = Column(String(NAME_LEN)) def __init__(self, **kwargs: Any) -> None: rename_key(kwargs, 'BUASD13CD', 'buasd_code') rename_key(kwargs, 'BUASD13NM', 'buasd_name') super().__init__(**kwargs) class CASWard(Base): """ Represents censua area statistics (CAS) wards in the UK, 2003. - https://www.ons.gov.uk/methodology/g
fro
m .visitor import Visitor from .metavisitor import MetaVisitor from .experiments import ExperimentsVisitor from .usedby import UsedByVisitor from .testedscenarios import TestedScenariosVisitor from .invalidentities import InvalidEntitiesVisitor # from presenter.gesurvey i
mport GESurveyPresenter
opA = DummyOperator(task_id='A') opB = DummyOperator(task_id='B') opC = DummyOperator(task_id='C') opD = DummyOperator(task_id='D') opE = DummyOperator(task_id='E') opF = DummyOperator(task_id='F') opA.set_downstream(opB) opB.set_downstream(opC) opB.set_downstream(opD) opE.set_downstream(opF) self.assertFalse(dag.test_cycle()) # test self loop dag = DAG( 'dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # A -> A with dag: opA = DummyOperator(task_id='A') opA.set_downstream(opA) with self.assertRaises(AirflowDagCycleException): dag.test_cycle() # test downstream self loop dag = DAG( 'dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # A -> B -> C -> D -> E -> E with dag: opA = DummyOperator(task_id='A') opB = DummyOperator(task_id='B') opC = DummyOperator(task_id='C') opD = DummyOperator(task_id='D') opE = DummyOperator(task_id='E') opA.set_downstream(opB) opB.set_downstream(opC) opC.set_downstream(opD) opD.set_downstream(opE) opE.set_downstream(opE) with self.assertRaises(AirflowDagCycleException): dag.test_cycle() # large loop dag = DAG( 'dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # A -> B -> C -> D -> E -> A with dag: opA = DummyOperator(task_id='A') opB = DummyOperator(task_id='B') opC = DummyOperator(task_id='C') opD = DummyOperator(task_id='D') opE = DummyOperator(task_id='E') opA.set_downstream(opB) opB.set_downstream(opC) opC.set_downstream(opD) opD.set_downstream(opE) opE.set_downstream(opA) with self.assertRaises(AirflowDagCycleException): dag.test_cycle() # test arbitrary loop dag = DAG( 'dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'}) # E-> A -> B -> F -> A # -> C -> F with dag: opA = DummyOperator(task_id='A') opB = DummyOperator(task_id='B') opC = DummyOperator(task_id='C') opD = DummyOperator(task_id='D') opE = DummyOperator(task_id='E') opF = DummyOperator(task_id='F') opA.set_downstream(opB) opA.set_downstream(opC) opE.set_downstream(opA) opC.set_downstream(opF) opB.set_downstream(opF) opF.set_downstream(opA) with self.assertRaises(AirflowDagCycleException): dag.test_cycle() def test_following_previous_schedule(self): """ Make sure DST transitions are properly observed """ local_tz = pendulum.timezone('Europe/Zurich') start = local_tz.convert(datetime.datetime(2018, 10, 28, 2, 55), dst_rule=pendulum.PRE_TRANSITION) self.assertEqual(start.isoformat(), "2018-10-28T02:55:00+02:00", "Pre-condition: start date is in DST") utc = timezone.convert_to_utc(start) dag = DAG('tz_dag', start_date=start, schedule_interval='*/5 * * * *') _next = dag.following_schedule(utc) next_local = local_tz.convert(_next) self.assertEqual(_next.isoformat(), "2018-10-28T01:00:00
+00:00") self.assertEqual(next_local.isoformat(), "2018-10-28T02:00:00+01:00") prev = dag.previous_schedule(utc) prev_local = local_tz.convert(prev) self.assertEqual(prev_local.isoformat(), "2018-10-28T02:50:00+02:00") prev = dag.previous_schedule(_next) prev_local = local_tz.convert(prev) self.assertEqual(prev_local.isoformat(), "2018-10-28T02:55:00+02:00")
self.assertEqual(prev, utc) def test_following_previous_schedule_daily_dag_CEST_to_CET(self): """ Make sure DST transitions are properly observed """ local_tz = pendulum.timezone('Europe/Zurich') start = local_tz.convert(datetime.datetime(2018, 10, 27, 3), dst_rule=pendulum.PRE_TRANSITION) utc = timezone.convert_to_utc(start) dag = DAG('tz_dag', start_date=start, schedule_interval='0 3 * * *') prev = dag.previous_schedule(utc) prev_local = local_tz.convert(prev) self.assertEqual(prev_local.isoformat(), "2018-10-26T03:00:00+02:00") self.assertEqual(prev.isoformat(), "2018-10-26T01:00:00+00:00") _next = dag.following_schedule(utc) next_local = local_tz.convert(_next) self.assertEqual(next_local.isoformat(), "2018-10-28T03:00:00+01:00") self.assertEqual(_next.isoformat(), "2018-10-28T02:00:00+00:00") prev = dag.previous_schedule(_next) prev_local = local_tz.convert(prev) self.assertEqual(prev_local.isoformat(), "2018-10-27T03:00:00+02:00") self.assertEqual(prev.isoformat(), "2018-10-27T01:00:00+00:00") def test_following_previous_schedule_daily_dag_CET_to_CEST(self): """ Make sure DST transitions are properly observed """ local_tz = pendulum.timezone('Europe/Zurich') start = local_tz.convert(datetime.datetime(2018, 3, 25, 2), dst_rule=pendulum.PRE_TRANSITION) utc = timezone.convert_to_utc(start) dag = DAG('tz_dag', start_date=start, schedule_interval='0 3 * * *') prev = dag.previous_schedule(utc) prev_local = local_tz.convert(prev) self.assertEqual(prev_local.isoformat(), "2018-03-24T03:00:00+01:00") self.assertEqual(prev.isoformat(), "2018-03-24T02:00:00+00:00") _next = dag.following_schedule(utc) next_local = local_tz.convert(_next) self.assertEqual(next_local.isoformat(), "2018-03-25T03:00:00+02:00") self.assertEqual(_next.isoformat(), "2018-03-25T01:00:00+00:00") prev = dag.previous_schedule(_next) prev_local = local_tz.convert(prev) self.assertEqual(prev_local.isoformat(), "2018-03-24T03:00:00+01:00") self.assertEqual(prev.isoformat(), "2018-03-24T02:00:00+00:00") @patch('airflow.models.timezone.utcnow') def test_sync_to_db(self, mock_now): dag = DAG( 'dag', start_date=DEFAULT_DATE, ) with dag: DummyOperator(task_id='task', owner='owner1') SubDagOperator( task_id='subtask', owner='owner2', subdag=DAG( 'dag.subtask', start_date=DEFAULT_DATE, ) ) now = datetime.datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC')) mock_now.return_value = now session = settings.Session() dag.sync_to_db(session=session) orm_dag = session.query(DagModel).filter(DagModel.dag_id == 'dag').one() self.assertEqual(set(orm_dag.owners.split(', ')), {'owner1', 'owner2'}) self.assertEqual(orm_dag.last_scheduler_run, now) self.assertTrue(orm_dag.is_active) orm_subdag = session.query(DagModel).filter( DagModel.dag_id == 'dag.subtask').one() self.assertEqual(set(orm_subdag.owners.split(', ')), {'owner1', 'owner2'}) self.assertEqual(orm_subdag.last_scheduler_run, now) self.assertTrue(orm_subdag.is_active) class DagStatTest(unittest.TestCase): def test_dagstats_crud(self): DagStat.create(dag_id='test_dagstats_crud') session = settings.Session() qry = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud') self.assertEqual(len(qry.all()), len(State.dag_states)) DagStat.set_dirty(dag_id='test_dagstats_crud') r
# -*- coding: utf-8 -*- import os import time from StringIO import StringIO from PIL import Image from django.conf import settings from easy_thumbnails.base import Thumbnail from easy_thumbnails.main import DjangoThumbnail, get_thumbnail_setting from easy_thumbnails.processors import dynamic_import, get_valid_options from easy_thumbnails.tests.base import BaseTest, RELATIVE_PIC_NAME, PIC_NAME,\ THUMB_NAME, PIC_SIZE class ThumbnailTest(BaseTest): def testThumbnails(self): # Thumbnail thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 1, requested_size=(240, 240)) self.verify_thumbnail((240, 180), thumb) # Cropped thumbnail thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 2, requested_size=(240, 240), opts=['crop']) self.verify_thumbnail((240, 240), thumb) # Thumbnail with altered JPEG quality thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 3, requested_size=(240, 240), quality=95) self.verify_thumbnail((240, 180), thumb) def testRegeneration(self): # Create thumbnail thumb_name = THUMB_NAME % 4 thumb_size = (240, 240) Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size) self.images_to_delete.add(thumb_name) thumb_mtime = os.path.getmtime(thumb_name) time.sleep(1) # Create another instance, shouldn't generate a new thumb Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size) self.assertEqual(os.path.getmtime(thumb_name), thumb_mtime) # Recreate the source image, then see if a new thumb is generated Image.new('RGB', PIC_SIZE).save(PIC_NAME, 'JPEG') Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size) self.assertNotEqual(os.path.getmtime(thumb_name), thumb_mtime) def testFilelikeDest(self): # Thumbnail filelike_dest = StringIO() thumb = Thumbnail(source=PIC_NAME, dest=filelike_dest, requested_size=(240, 240)) self.verify_thumbnail((240, 180), thumb) def testRGBA(self): # RGBA image rgba_pic_name = os.path.join(settings.MEDIA_ROOT, 'easy-thumbnails-test_rgba_source.png') Image.new('RGBA', PIC_SIZE).save(rgba_pic_name) self.images_to_delete.add(rgba_pic_name) # Create thumb and verify it's still RGBA rgba_thumb_name = os.path.join(settings.MEDIA_ROOT, 'easy-thumbnails-test_rgba_dest.png') thumb = Thumbnail(source=rgba_pic_name, dest=rgba_thumb_name, requested_size=(240, 240)) self.verify_thumbnail((240, 180), thumb, expected_mode='RGBA') class DjangoThumbnailTest(BaseTest): def setUp(self): super(DjangoThumbnailTest, self).setUp() # Add another source image in a sub-directory for testing subdir and # basedir. self.sub_dir = os.path.join(settings.MEDIA_ROOT, 'test_thumbnail') try: os.mkdir(self.sub_dir) except OSError: pass self.pic_subdir = os.path.join(self.sub_dir, RELATIVE_PIC_NAME) Image.new('RGB', PIC_SIZE).save(self.pic_subdir, 'JPEG') self.images_to_delete.add(self.pic_subdir) def testFilenameGeneration(self): basename = RELATIVE_PIC_NAME.replace('.', '_') # Basic filename thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120)) expected = os.path.join(settings.MEDIA_ROOT, basename) expected += '_240x120_q85.jpg' self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Changed quality and cropped thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120), opts=['crop'], quality=95) expected = os.path.join(settings.MEDIA_ROOT, basename) expected += '_240x120_crop_q95.jpg' self.verify_thumbnail((240, 120), thumb, expected_filename=expected) # All options on processors = dynamic_import(get_thumbnail_setting('PROCESSORS')) valid_options = get_valid_options(processors) thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120), opts=valid_options) expected = (os.path.join(settings.MEDIA_ROOT, basename) + '_240x120_' 'autocrop_bw_crop_detail_max_sharpen_upscale_q85.jpg') self.verify_thumbnail((240, 120), thumb, expected_filename=expected) # Different basedir basedir = 'easy-thumbnails-test-basedir' self.change_settings.change({'BASEDIR': basedir}) thumb = DjangoThumbnail(relative_source=self.pic_subdir, requested_size=(240, 120)) expected = os.path.join(basedir, self.sub_dir, basename) expected += '_240x120_q85.jpg' self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Different subdir self.change_settings.change({'BASEDIR': '', 'SUBDIR': 'subdir'}) thumb = DjangoThumbnail(relative_source=self.pic_subdir, requested_size=(240, 120)) expected = os.path.join(settings.MEDIA_ROOT, os.path.basename(self.sub_dir), 'subdir', basename) expected += '_240x120_q85.jpg' self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Different prefix self.change_settings.change({'SUBDIR': '', 'PREFIX': 'prefix-'}) thumb = DjangoThumbnail(relative_source=self.pic_subdir, requested_size=(240, 120)) expected = os.path.join(self.sub_dir, 'prefix-' + basename) expected += '_240x120_q85.jpg' self.verify_thumbn
ail((160, 120), thumb, expected_filename=expected) def t
estAlternateExtension(self): basename = RELATIVE_PIC_NAME.replace('.', '_') # Control JPG thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120)) expected = os.path.join(settings.MEDIA_ROOT, basename) expected += '_240x120_q85.jpg' expected_jpg = expected self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Test PNG thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME, requested_size=(240, 120), extension='png') expected = os.path.join(settings.MEDIA_ROOT, basename) expected += '_240x120_q85.png' self.verify_thumbnail((160, 120), thumb, expected_filename=expected) # Compare the file size to make sure it's not just saving as a JPG with # a different extension. self.assertNotEqual(os.path.getsize(expected_jpg), os.path.getsize(expected)) def testUnicodeName(self): unicode_name = 'easy-thumbnails-ążśź_source.jpg' unicode_path = os.path.join(settings.MEDIA_ROOT, unicode_name) Image.new('RGB', PIC_SIZE).save(unicode_path) self.images_to_delete.add(unicode_path) thumb = DjangoThumbnail(relative_source=unicode_name, requested_size=(240, 120)) base_name = unicode_name.replace('.', '_') expected = os.path.join(settings.MEDIA_ROOT, base_name + '_240x120_q85.jpg') self.verify_thumbnail((160, 120), thumb, expected_filename=expected) def tearDown(self): super(DjangoThumbnailTest, self).tearDown() subdir = os.path.join(self.sub_dir, 'subdir') if os.path.exists(subdir): os.rmdir(subdir) os.rmdir(self.sub_dir)
.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductImage']"}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}), 'time': ('django.db.models.fields.DateTimeField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}) }, u'catalog.likeshop': { 'Meta': {'object_name': 'LikeShop'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}), 'time': ('django.db.models.fields.DateTimeField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}) }, u'catalog.liketutorial': { 'Meta': {'object_name': 'LikeTutorial'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}), 'time': ('django.db.models.fields.DateTimeField', [], {}), 'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}) }, u'catalog.list': { 'Meta': {'object_name': 'List'}, 'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.ListItem']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"}) }, u'catalog.listgroup': { 'Meta': {'object_name': 'ListGroup'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.List']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'catalog.listitem': { 'Meta': {'object_name': 'ListItem'}, 'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}) }, u'catalog.location': { 'Meta': {'object_name': 'Location'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'catalog.logidenticalproduct': { 'Meta': {'object_name': 'LogIdenticalProduct'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': u"orm['catalog.Product']"}), 'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': u"orm['catalog.Product']"}), 'time': ('django.db.models.fields.DateTimeField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.Faceb
ookCustomUser']"}) }, u'catalog.makey': { 'Meta': {'object_name': 'Makey'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}), 'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', '
related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Comment']"}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Documentation']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}), 'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeylikes'", 'to': u"orm['django_facebook.FacebookCustomUser']", 'through': u"orm['catalog.LikeMakey']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Note']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'}) }, u'catalog.note': { 'Meta': {'object_name': 'Note'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}) }, u'catalog.product': { 'Meta': {'object_name': 'Product'}, 'added_time': ('django.db.models.fields.DateTimeField', [], {}), 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']", 'null': 'True', 'blank': 'True'}), 'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': u"orm['catalog.Makey']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'sku': ('django.db.models.fields.IntegerField', [], {}), 'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'}) }, u'catalog.productdescription': { 'Meta': {'object_name': 'Pr
from functions impo
rt * from utils imp
ort *
#!/usr/bin/env python """A
llows functions from coot_utils to be imported""" # Copyright 2011, 2012 Kevin Keating # # Licensed under the Educational Community License, Version 2.0 (the # "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.osedu.org/licenses/ECL-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "A
S IS" # BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. #"import coot_utils" results in an error, so this module is required to retrieve #functions that are defined in coot_utils import os, sys from os.path import exists, join from coot import * use_gui_qm = False #coot_utils requires this variable to be defined #search the Python path for coot_utils for curpath in sys.path: abspath = join(curpath, "coot_utils.py") if exists(abspath): #when we find it, exec it #but first exec redefine_functions.py if it's in the same directory #redefine_functions.py renames func_py() to func(), which used to be done in coot_utils.py itself #new versions of coot_utils.py requires this renaming to be done before being exec'ed redefAbspath = join(curpath, "redefine_functions.py") if exists(redefAbspath): execfile(redefAbspath) execfile(abspath) break
""" @brief test log(time=0s) """ import os import unittest from pyquick
helper.loghelper import fLOG
from pyquickhelper.filehelper import explore_folder_iterfile from pyquickhelper.ipythonhelper import upgrade_notebook, remove_execution_number class TestConvertNotebooks(unittest.TestCase): """Converts notebooks from v3 to v4. Should not be needed anymore.""" def test_convert_notebooks(self): fLOG( __file__, self._testMethodName, OutputPrint=__name__ == "__main__") fold = os.path.abspath(os.path.dirname(__file__)) fold2 = os.path.normpath( os.path.join(fold, "..", "..", "_doc", "notebooks")) for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"): t = upgrade_notebook(nbf) if t: fLOG("modified", nbf) # remove numbers remove_execution_number(nbf, nbf) fold2 = os.path.normpath(os.path.join(fold, "..", "..", "_unittests")) for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"): t = upgrade_notebook(nbf) if t: fLOG("modified", nbf) if __name__ == "__main__": unittest.main()
weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Tests for management commands. """ from django.test import TestCase from weblate.trans.tests.test_models import RepoTestCase from weblate.trans.models import SubProject from django.core.management import call_command from django.core.management.base import CommandError import django # Django 1.5 changes behavior here if django.VERSION >= (1, 5): COMMAND_EXCEPTION = CommandError else: COMMAND_EXCEPTION = SystemExit class ImportProjectTest(RepoTestCase): def test_import(self): project = self.create_project() call_command( 'import_project', 'test', self.repo_path, 'master', '**/*.po', ) # We should have loaded four subprojects self.assertEqual(project.subproject_set.count(), 4) def test_import_po(self): project = self.create_project() call_command( 'import_project', 'test', self.repo_path, 'master', '**/*.po', file_format='po' ) # We should have loaded four subprojects self.assertEqual(project.subproject_set.count(), 4) def test_import_invalid(self): project = self.create_project() self.assertRaises( COMMAND_EXCEPTION, call_command, 'import_project', 'test', self.repo_path, 'master', '**/*.po', file_format='INVALID' ) # We should have loaded none subprojects self.assertEqual(project.subproject_set.count(), 0) def test_import_aresource(self): project = self.create_project() call_command( 'import_project', 'test', self.repo_path, 'master', '**/values-*/strings.xml', file_format='aresource', base_file_template='android/values/strings.xml', ) # We should have loaded one subproject self.assertEqual(project.subproject_set.count(), 1) def test_import_aresource_format(self): project = self.create_project() call_command( 'import_project', 'test', self.repo_path, 'master', '**/values-*/strings.xml', file_format='aresource', base_file_template='%s/values/strings.xml', ) # We should have loaded one subproject self.assertEqual(project.subproject_set.count(), 1) def test_re_import(self): project = self.create_project() call_command( 'import_project', 'test', self.repo_path, 'master', '**/*.po', ) # We should have loaded four subprojects self.assertEqual(project.subproject_set.count(), 4) call_command( 'import_project', 'test', self.repo_path, 'master', '**/*.po', ) # We should load no more subprojects self.assertEqual(project.subproject_set.count(), 4) def test_import_against_existing(self): ''' Test importing with a weblate:// URL ''' android = self.create_android() project = android.project self.assertEqual(project.subproject_set.count(), 1) call_command( 'import_project', project.slug, 'weblate://%s/%s' % (project.slug, android.slug), 'master', '**/*.po', ) # We should have loaded five subprojects self.assertEqual(project.subproject_set.count(), 5) def test_import_missing_project(self): ''' Test of correct handling of missing project. ''' self.assertRaises( COMMAND_EXCEPTION, call_command, 'import_project', 'test', self.repo_path, 'master', '**/*.po', ) def test_import_missing_wildcard(self): ''' Test of correct handling of missing wildcard. ''' self.create_project() self.assertRaises( COMMAND_EXCEPTION, call_command, 'import_project', 'test', self.repo_path, 'master', '*/*.po', ) class BasicCommandTest(TestCase): def test_versions(self): call_command('list_versions') class PeriodicCommandTest(RepoTestCase): def setUp(self): super(PeriodicCommandTest, self).setUp() self.create_subproject() def test_cleanup(self): call_command( 'cleanuptrans' ) def test_update_index(self): # Test the command call_command( 'update_index' ) def test_list_checks(self): call_command( 'list_ignored_checks' ) call_command( 'list_ignored_checks', list_all=True ) call_command( 'list_ignored_checks', count=10 ) class CheckGitTest(RepoTestCase): ''' Base class for handling tests of WeblateCommand based commands. ''' command_name = 'checkgit' def setUp(self): super(CheckGitTest, self).setUp() self.create_subproject() def do_test(self, *args, **kwargs): call_command( self.command_name, *args, **kwargs ) def test_all(self): self.do_test( all=True, ) def test_project(self): self.do_test( 'test', ) def test_subproject(self): self.do_test( 'test/test', ) def test_nonexisting_project(self): self.assertRaises( COMMAND_EXCEPTION, self.do_test, 'notest', ) def test_nonexisting_subproject(self): self.assertRaises( COMMAND_EXCEPTION, self.do_test, 'test/notest', ) class CommitPendingTest(CheckGitTest): command_name = 'commit_pending' class CommitGitTest(CheckGitTest): command_name = 'commitgit' class PushGitTest(CheckGitTest): command_name = 'pushgit' class LoadTest(CheckGitTest): command_name = 'loadpo' class UpdateChecksTest(CheckGitTest): command_name = 'updatechecks' class UpdateGitTest(CheckGitTest): command_name = 'updategit' class RebuildIndexTest(CheckGitTest): command_name = 'rebuild_index' def test_all_clean(self): self.do_test( all=True, clean=True, ) class LockTranslationTest(CheckGitTest): command_name = 'lock_translation' class UnLockTranslationTest(CheckGitTest): command_name = 'unlock_translation' class LockingCommandTest(RepoTestCase): ''' Test locking and unlocking. ''' de
f setUp(self): super(LockingCommandTest, self).setUp() self.create_subproject() def test_locking(self): subproject = SubProject.objects.all()[0] self.assertFalse( SubProject.objects.filter(locked=True).exists() ) call_command( 'lock_translation'
, '{0}/{1}'.format( subproject.project.slug, subproject.slug, ) ) self.assertTrue( SubProject.objects.filter(locked=True).exists() ) call_command( 'unlock_translati
#/**************************************************************************** # Copyright 2015, Colorado School of Mines and others. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #****************************************************************************/ import os,sys from java.awt.image import * from java.awt import * from java.lang import * from javax.swing import * import math from jarray import * from edu.mines.jtk.awt import ColorMap from edu.mines.jtk.mosaic import * from edu.mines.jtk.dsp import LocalSmoothingFilter from edu.mines.jtk.dsp import Sampling from edu.mines.jtk.util.ArrayMath import * from edu.mines.jtk.sgl import * ############################################################################## # Perceptual Color Map Demo Using CIE L*a*b* Color Space # # Humans are terrible at differentiating colors. We can't help it - # it's biology! The human eye has four types of receptors: the rods which are # sensitive only to black, white and shades of gray, and cones of which there # are three types, each responding to a different range of colors. In fact, # those ranges have some degree of overlap, and not every wavelength range is # adequately covered. # # Because of this, there exists two main sets of colors that are always # competing for dominance and can not be perceived together: the Red-Green # pair, and the Yellow-Blue pair. These are known as "color opponents". # # Conventional color models such as RGB and CMYK do not adequately reflect # this physiological bias. # # The CIE L*a*b* (or CIELAB) color space addresses this by describing the # colors visible to the human eye. It is a three-dimensional color space # where L* represents the lightness of a color, a* represents a color's # position between the red and green color opponents, and b* represents a # color's position between blue and yellow. # # When we convert color maps and observe the lightness (L*) we immediately see # we immediately see distinct inflection points which are observed to be bands # or contours in the original color map. This can create biases when applied # to scientific visualization by unnecessarily leading our eyes or creating # false topography. # # There are two ways this demo addresses this. The first method smooths the # lightness graph thereby reducing the inflection points, which essentially # "smooths" the sharp bands of color when transitioning hues. # The second method assigns a new monotonically increasing lightness graph, # which attempts to approximate that each value change is represented by a # change in perception. # # Author: Chris Engelsma # Version: 2015.09.27 ############################################################################## def main(args): pp1 = test1() pp2 = test2() # pp3 = test3() pf = PlotFrame(pp1,pp2,PlotFrame.Split.HORIZONTAL) pf.setDefaultCloseOperation(PlotFrame.EXIT_ON_CLOSE) pf.setVisible(True) return def test1(): rgb,Lab = getRgbAndLab() L = getLightnessFromLab(Lab) return plot(L,icm) def test2(): rgb,Lab = getRgbAndLab() Lab = smoothLightness(Lab) L = getLightnessFromLab(Lab) icm2 = getNewColorModel(Lab) return plot(L,icm2) def test3(): rgb,Lab = getRgbAndLab() Lab = setMonotonicallyIncreasingLightness(Lab) L
= getLightnessFromLab(Lab) icm2 = getNewColorModel(Lab) return plot(L,icm2) def plot(L,icm): pp = PlotPanel(2,1) pv = pp.addPixels(0,0,f) pv.setColorModel(icm) pv.setOrientation(PixelsView.Orientation.X1DOWN_X2RIGHT) pv.setInterpolation(PixelsView.Interpolation.LINEAR) pov = pp.addPoints(1,0,L) pov.setMarkStyle(PointsView.Mark.FILLED_CIRCLE) pov.setMarkSize(2) pov.setLineStyle(PointsView.Line.NONE) pp.setHLabel(0,"Color value") pp.setVLabel(1,"Lightness (
L*)") pp.setVLimits(1,0,100) return pp def getNewColorModel(Lab): col = zeros(len(x),Color) for i in range(len(x)): j = 3*i rgb = ColorMap.cieLabToRgb(Lab[j+0],Lab[j+1],Lab[j+2]) col[i] = Color(rgb[0],rgb[1],rgb[2]); cm = ColorMap(0,1,col) return cm.getColorModel() def getRgbAndLab(): cm = ColorMap(icm) Lab = zerofloat(n*3) rgb = zerofloat(n*3) color = zerofloat(3) for i in range(len(x)): cieLab = cm.getCieLabFloats(f[i]) color = cm.getRgbFloats(f[i]) rgb[3*i+0] = color[0] rgb[3*i+1] = color[1] rgb[3*i+2] = color[2] Lab[3*i+0] = cieLab[0] Lab[3*i+1] = cieLab[1] Lab[3*i+2] = cieLab[2] return rgb,Lab def getLightnessFromLab(Lab): L = zerofloat(len(Lab)/3) for i in range(len(L)): L[i] = Lab[3*i] return L def setUniformLightness(Lab,v): for i in range(len(Lab)/3): Lab[3*i] = v return Lab def setMonotonicallyIncreasingLightness(Lab): for i in range(len(Lab)/3): Lab[3*i] = i * (50.0/256.0) + 25 return Lab def smoothLightness(Lab): w = 10; n = len(Lab)/3 for k in range(5): for i in range(n): lw = max(0,i-w) rw = min(n,i+w) val = 0.0 for j in range(lw,rw): val += Lab[3*j] val /= rw-lw Lab[3*i] = val return Lab n = 256; d1 = .0039; f1 = 0.0; x = rampfloat(f1,d1,n) f = zerofloat(1,n) for i in range(n): f[i][0] = x[i] s1 = Sampling(n,d1,f1) icm = ColorMap.HUE ############################################################################## class RunMain(Runnable): def run(self): main(sys.argv) SwingUtilities.invokeLater(RunMain())
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer o
f the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This li
brary is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCTWDistributionAnalysis from .mbcssm import EUCTW_SM_MODEL class EUCTWProber(MultiByteCharSetProber): def __init__(self): super(EUCTWProber, self).__init__() self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL) self.distribution_analyzer = EUCTWDistributionAnalysis() self.reset() @property def charset_name(self): return "EUC-TW" @property def language(self): return "Taiwan"
import numpy as np from stcad.source_dev.chip import Base_Chip from stcad.source_dev.objects import Drum imp
ort gdsCAD as cad chipsize = 50 chip = Base_Chip('drum', chipsize, chipsize,label=False) inductor = Drum(base_layer = 1, sacrificial_layer = 2 , top_layer = 3, outer_radius = 9, head_radius = 7, electrode_radius = 6, cable_width = 0.5, sacrificial_tail_width = 3, sacrificial_tail_length = 3, opening_width = 4, N_holes = 3, hole_angle = 45, hole_distance_to_center = 4.5, hole_distance_to_e
dge = 0.5, name = '') chip.add_component(inductor, (0,0)) chip.save_to_gds(show=False, save=True,loc='')
fro
m django.contrib import admin from django import forms from . import models from nnmarkdown.form import MarkdownWidget from nnscr.admin import site class PageAdminForm(forms.ModelForm): class Meta: model = models.Page exclude = ("slug",) widgets = { "text": MarkdownWidget } class PageAdmin(admin.ModelAdmin): form = PageAdminForm site.register(
models.Page, PageAdmin)
class Garden(object):
"""An object implementing a Kindergarten Garden.""" def __init__(self, cup_string, students=None): self.garden_rows = cup_string.split('\n') if students: self.class_list = sorted(students) else: self.class_list
= [ "Alice", "Bob", "Charlie", "David", "Eve", "Fred", "Ginny", "Harriet", "Ileana", "Joseph", "Kincaid", "Larry" ] self.plants_dict = { "R": "Radishes", "C": "Clover", "G": "Grass", "V": "Violets" } self.cups_per_child = 2 def plants(self, child_name): index = self.cups_per_child * self.class_list.index(child_name) child_plant_label_lists = [row[index:index + self.cups_per_child] for row in self.garden_rows] child_plant_labels = ''.join(child_plant_label_lists) child_plants = [self.plants_dict[label] for label in child_plant_labels] return child_plants
# -*- coding: utf-8 -*- from nani.admin import TranslatableModelAdminMixin from nani.forms import translatable_inlineformset_factory from nani.forms import TranslatableModelForm, TranslatableModelFormMetaclass from nani.test_utils.context_managers import LanguageOverride from nani.test_utils.testcase import NaniTestCase from nani.test_utils.request_factory import RequestFactory from testproject.app.models import Normal, Related from django.db import models class TestBasicInline(NaniTestCase): def setUp(self): with LanguageOverride("en"): self.object = Normal.objects.language().create(shared_
field="test", translated_field="translated test") rf = RequestFactory() self.request = rf.post('/url/') def test_create_fields_inline(self): with LanguageOverride("en"): # Fixtures (should eventually be shared with other tests) translate_mixin = TranslatableModelAdminMixin()
formset = translatable_inlineformset_factory(translate_mixin._language(self.request), Normal, Related)(#self.request.POST, instance=self.object) self.assertTrue(formset.forms[0].fields.has_key("normal")) self.assertTrue(formset.forms[0].fields.has_key("translated")) self.assertTrue(formset.forms[0].fields.has_key("translated_to_translated")) self.assertFalse(formset.forms[0].fields.has_key("language_code"))
# coding=utf-8 # Copyright 2022 RigL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for weight_symmetry.datasets.cifar10.""" from absl.testing import absltest import numpy as np from rigl.experimental.jax.datasets import cifar10 class CIFAR10DatasetTest(absltest.TestCase): """Test cases for CIFAR10 Dataset.""" def setUp(self): """Common setup routines/variables for test cases.""" super().setUp() self._batch_size = 16 self._batch_size_test = 10 self._shuffle_buffer_size = 8 self._dataset = cifar10.CIFAR10Dataset( self._batch_size, batch_size_test=self._batch_size_test, shuffle_buffer_size=self._shuffle_buffer_size) def test_create_dataset(self): """Tests creation of dataset.""" self.assertIsInstance(self._dataset, cifar10.CIFAR10Dataset) def test_train_image_dims_content(self): """Tests dimensions and contents of test data.""" iterator = self._dataset.get_train() sample = next(iterator) image, label = sample['image'], sample['label'] with self.subTest(name='DataShape'): self.assertTupleEqual(image.shape, (self._batch_size, 32, 32, 3)) with self.subTest(name='DataType'): self.assertTrue(np.issubdtype(image.dtype, np.float)) with self.subTest(name='DataValues'): # Normalized by stddev
., expect nothing to fall outside 3 stddev. self.assertTrue((image >= -3.).all() and (image <= 3.).all()) with self.subTest(name='LabelShape'): self.assertLen(label, self._batch_size) with self.subTest(name='LabelType'): self.assertTrue(np.issubdtype(label.dtype, np.int)) with self.subTest(name='LabelValues'): self.assertTrue((label >= 0).all() and (label <= self._dataset.num_classes).all())
def test_test_image_dims_content(self): """Tests dimensions and contents of train data.""" iterator = self._dataset.get_test() sample = next(iterator) image, label = sample['image'], sample['label'] with self.subTest(name='DataShape'): self.assertTupleEqual(image.shape, (self._batch_size_test, 32, 32, 3)) with self.subTest(name='DataType'): self.assertTrue(np.issubdtype(image.dtype, np.float)) with self.subTest(name='DataValues'): # Normalized by stddev., expect nothing to fall outside 3 stddev. self.assertTrue((image >= -3.).all() and (image <= 3.).all()) with self.subTest(name='LabelShape'): self.assertLen(label, self._batch_size_test) with self.subTest(name='LabelType'): self.assertTrue(np.issubdtype(label.dtype, np.int)) with self.subTest(name='LabelValues'): self.assertTrue((label >= 0).all() and (label <= self._dataset.num_classes).all()) def test_train_data_length(self): """Tests length of training dataset.""" total_count = 0 for batch in self._dataset.get_train(): total_count += len(batch['label']) self.assertEqual(total_count, self._dataset.get_train_len()) def test_test_data_length(self): """Tests length of test dataset.""" total_count = 0 for batch in self._dataset.get_test(): total_count += len(batch['label']) self.assertEqual(total_count, self._dataset.get_test_len()) def test_dataset_nonevenly_divisible_batch_size(self): """Tests non-evenly divisible test batch size.""" with self.assertRaisesRegex( ValueError, 'Test data not evenly divisible by batch size: .*'): self._dataset = cifar10.CIFAR10Dataset( self._batch_size, batch_size_test=101) if __name__ == '__main__': absltest.main()
# -*- coding: utf-8 -*- """ sphinx.util.parallel ~~~~~~~~~~~~~~~~~~~~ Parallel building utilities. :copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import traceback try: import multiprocessing import threading except ImportError: multiprocessing = threading = None from six.moves import queue from sphinx.errors import SphinxParallelError # our parallel functionality only works for the forking Process parallel_available = multiprocessing and (os.name == 'posix') class SerialTasks(object): """Has the same interface as ParallelTasks, but executes tasks directly.""" def __init__(self, nproc=1): pass def add_task(self, task_func, arg=None, result_func=None): if arg is not None: res = task_func(arg) else: res = task_func() if result_func: result_func(res) def join(self): pass class ParallelTasks(object): """Executes *nproc* tasks in parallel after forking.""" def __init__(self, nproc): self.nproc = nproc # list of threads to join when waiting for completion self._taskid = 0 self._threads = {} self._nthreads = 0 # queue of result objects to process
self.result_queue = queue.Queue() self._nprocessed = 0 # maps tasks to result functions self._result_funcs = {} # allow only "nproc" worker processes at once self._semaphore = threading.Semaphore(self.nproc) def _process(self, pipe, func, arg): try: if arg is None: ret = func() else: ret = func(arg)
pipe.send((False, ret)) except BaseException as err: pipe.send((True, (err, traceback.format_exc()))) def _process_thread(self, tid, func, arg): precv, psend = multiprocessing.Pipe(False) proc = multiprocessing.Process(target=self._process, args=(psend, func, arg)) proc.start() result = precv.recv() self.result_queue.put((tid, arg) + result) proc.join() self._semaphore.release() def add_task(self, task_func, arg=None, result_func=None): tid = self._taskid self._taskid += 1 self._semaphore.acquire() thread = threading.Thread(target=self._process_thread, args=(tid, task_func, arg)) thread.setDaemon(True) thread.start() self._nthreads += 1 self._threads[tid] = thread self._result_funcs[tid] = result_func or (lambda *x: None) # try processing results already in parallel try: tid, arg, exc, result = self.result_queue.get(False) except queue.Empty: pass else: del self._threads[tid] if exc: raise SphinxParallelError(*result) result_func = self._result_funcs.pop(tid)(arg, result) if result_func: result_func(result) self._nprocessed += 1 def join(self): while self._nprocessed < self._nthreads: tid, arg, exc, result = self.result_queue.get() del self._threads[tid] if exc: raise SphinxParallelError(*result) result_func = self._result_funcs.pop(tid)(arg, result) if result_func: result_func(result) self._nprocessed += 1 # there shouldn't be any threads left... for t in self._threads.values(): t.join() def make_chunks(arguments, nproc, maxbatch=10): # determine how many documents to read in one go nargs = len(arguments) chunksize = min(nargs // nproc, maxbatch) if chunksize == 0: chunksize = 1 nchunks, rest = divmod(nargs, chunksize) if rest: nchunks += 1 # partition documents in "chunks" that will be written by one Process return [arguments[i*chunksize:(i+1)*chunksize] for i in range(nchunks)]
#!/usr/bin/python # script find clusters of small RNA reads in the genome # version 3 - 24-12-2013 evolution to multiprocessing # Usage clustering.py <bowtie input> <output> <bowtie index> <clustering_distance> <minimum read number per cluster to be outputed> <collapse option> <extention value> <average_cluster_size> # <folding> <output format> import sys, subprocess, time from collections import defaultdict # required for some SmRNAwindow attributes (readDic) #from numpy import mean, std # required for some SmRNAwindow methods #from scipy import stats from smRtools import * import multiprocessing def clustering (Instance): def clustermining (cluster, Instance): # cluster argument is a list if Instance.readDict[-cluster[0]]: # test whether the first position in the cluster was reverse reads shift = max(Instance.readDict[-cluster[0]]) upstream_coord = cluster[0] - shift + 1 else: upstream_coord = cluster[0] if Instance.readDict[cluster[-1]]: # test whether the last position i
n the cluster was forward reads shift = max(Instance.readDict[cluster[-1]]) downstream_coord = cluster[-1] + shift -1 else: downstream_coord = cluster[-1] readcount = Instance.readcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) mea
n_size, median_size, stdv_size = Instance.statsizes(upstream_coord=upstream_coord, downstream_coord=downstream_coord) if readcount >= minimum_reads and median_size >= min_median_size: location = [Instance.gene.split()[0], upstream_coord, downstream_coord] if output_format == "intervals": return "%s\t%s\t%s\t%s" % (location[0], location[1], location[2], readcount) cluster_size = downstream_coord - upstream_coord + 1 if folding == "yes" and cluster_size < 151: foldEnergy = Instance.foldEnergy(upstream_coord=upstream_coord, downstream_coord=downstream_coord) ## be careful, test ! else: foldEnergy = "." forwardReadcount = Instance.forwardreadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) # reverseReadcount = Instance.reversereadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) # density = readcount / float(cluster_size) # if output_format == "GFF3": if forwardReadcount >= reverseReadcount: GFFstrand = "+" else: GFFstrand = "-" Attributes = "ID=RC %s : FR %s : RR %s : Dens %s : Med %s : FE %s" % (readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy) return "%s\tGalaxy\tRead_Cluster\t%s\t%s\t%s\t%s\t.\t%s" % (location[0], location[1], location[2], readcount, GFFstrand, Attributes) else: Forward_Barycenter, Reverse_Barycenter = Instance.barycenter(upstream_coord=upstream_coord, downstream_coord=downstream_coord) Zsignature = Instance.signature(24,29,24,29,range(1,27), zscore="yes", upstream_coord=upstream_coord, downstream_coord=downstream_coord)[10] # Hsignature = Instance.hannon_signature(24,29,24,29, range(1,27), upstream_coord=upstream_coord, downstream_coord=downstream_coord )[10] * 100 UpiFreq = Instance.Ufreq(range(24,29), upstream_coord=upstream_coord, downstream_coord=downstream_coord) UsiFreq = Instance.Ufreq(range(20,22), upstream_coord=upstream_coord, downstream_coord=downstream_coord) return "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (location[0], location[1], location[2], cluster_size, readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy, Forward_Barycenter, Reverse_Barycenter, Zsignature, Hsignature, UpiFreq, UsiFreq) return False l = Instance.readDict.keys() l=[abs(i) for i in l] l=list(set(l)) l.sort() upstream = 0 cluster_list = [] for i, element in enumerate (l[1:]): if abs(element-l[i]) > dist or i+2==len(l): # the 2nd part of the logical test is to capture the last cluster if it overlaps the end of the list cluster = l[upstream:i+1] upstream = i+1 cluster_list.append(cluster) result_list = [] for i in cluster_list: totestresult = clustermining (i, Instance) if totestresult: result_list.append(totestresult) del Instance # return result_list def logtask (results): global number_of_clusters if results: number_of_clusters += len(results) LOG.append(results) return if __name__ == '__main__': start_time = time.time() fasta_dic = get_fasta (sys.argv[3]) objDic = {} number_of_reads = 0 F = open (sys.argv[1], "r") # F is the bowtie output taken as input for line in F: number_of_reads += 1 fields = line.split() polarity = fields[1] gene = fields[2] offset = int(fields[3]) size = len (fields[4]) try: objDic[gene].addread (polarity, offset, size) except KeyError: objDic[gene] = SmRNAwindow(gene, fasta_dic[gene]) objDic[gene].addread (polarity, offset, size) F.close() OUT = open (sys.argv[2], "w") output_format=sys.argv[8] if output_format == "intervals": print >> OUT, "#chrom\tStart\tEnd\tReadCount" elif output_format == "GFF3": print >> OUT, "##gff-version 3" else: print >> OUT, "#ID\t#chrom\tStart\tEnd\tLength\tReadCount\tForwardReads\tReverseReads\tDensity\tMedian\tFoldEnergy\tForBar\tRevBar\tz-score_signature\tHannon_signature\tUfreq_in_24-28RNAs\tUfreq_in_20-21RNs" dist = int(sys.argv[4]) min_median_size = int(sys.argv[6]) minimum_reads = int(sys.argv[5]) number_of_clusters = 0 Instance_ID = 0 folding=sys.argv[7] pool = multiprocessing.Pool(4) LOG = [] instance_list = [] for instance in objDic.keys(): instance_list.append(objDic[instance]) del objDic pool.map_async(clustering, instance_list, callback=logtask) pool.close() pool.join() for lines in LOG: for line in lines: print >> OUT, line OUT.close() elapsed_time = time.time() - start_time print "number of reads: %s\nnumber of clusters: %s\ntime: %s" % (number_of_reads, number_of_clusters, elapsed_time)
from ConfigParser import SafeConfigParser, NoSectionError import json import logging import os import sys import deimos.argv import deimos.docker from deimos.logger import log import deimos.logger from deimos._struct import _Struct def load_configuration(f=None, interactive=sys.stdout.isatty()): error = None defaults = _Struct(docker=Docker(), index=DockerIndex(), containers=Containers(), uris=URIs(), state=State(), log=Log( console=(logging.DEBUG if interactive else None), syslog=(logging.INFO if not interactive else None) )) parsed = None try: f = f if f else path() if f: parsed = parse(f) except Exception as e: error = e finally: confs = defaults.merge(parsed) if parsed else defaults deimos.logger.initialize(**dict(confs.log.items())) if error: pre = ("Error loading %s: " % f) if f else "" log.exception(pre + str(error)) sys.exit(16) if parsed: log.info("Loaded configuration from %s" % f) for _, conf in parsed.items(): log.debug("Found: %r", conf) return confs def coercearray(array): if type(array) in deimos.argv.strings: if array[0:1] != "[": return [array] try: arr = json.loads(array) if type(arr) is not list: raise ValueError() return arr except: raise ValueError("Not an array: %s" % array) return list(array) def coerceloglevel(level): if not level: return if type(level) is int: return level levels = {"DEBUG": logging.DEBUG, "INFO": logging.INFO, "WARNING": logging.WARNING, "ERROR": logging.ERROR, "CRITICAL": logging.CRITICAL, "NOTSET": logging.NOTSET} try: return levels[level] except: raise ValueError("Not a log level: %s" % level) def coercebool(b): if type(b) is bool: return b try: bl = json.loads(b) if type(bl) is not bool: raise ValueError() return bl except: raise ValueError("Not a bool: %s" % b) def coerceoption(val): try: return coercearray(val) except: return coercebool(val) class Image(_Struct): def __init__(self, default=None, ignore=False): _Struct.__init__(self, default=default, ignore=coercebool(ignore)) def override(self, image=None): return image if (image and not self.ignore) else self.default class Options(_Struct): def __init__(self, default=[], append=[], ignore=False): _Struct.__init__(self, default=coercearray(default),
append=coercearray(append), ignore=coercebool(ignore)) def override(self, options=[]): a = options if (len(options) > 0 and not self.ignore) else self.default return a + self.append class Containers(_Struct): def __init__(self, image=Image(), options=Options()):
_Struct.__init__(self, image=image, options=options) def override(self, image=None, options=[]): return self.image.override(image), self.options.override(options) class URIs(_Struct): def __init__(self, unpack=True): _Struct.__init__(self, unpack=coercebool(unpack)) class Log(_Struct): def __init__(self, console=None, syslog=None): _Struct.__init__(self, console=coerceloglevel(console), syslog=coerceloglevel(syslog)) class Docker(_Struct): def __init__(self, **properties): for k in properties.keys(): properties[k] = coerceoption(properties[k]) _Struct.__init__(self, **properties) def argv(self): return deimos.argv.argv(**dict(self.items())) class DockerIndex(_Struct): def __init__(self, index=None, account_libmesos="libmesos", account=None, dockercfg=None): _Struct.__init__(self, index=index, account_libmesos=account_libmesos, account=account, dockercfg=dockercfg) class State(_Struct): def __init__(self, root="/tmp/deimos"): if ":" in root: raise ValueError("Deimos root storage path must not contain ':'") _Struct.__init__(self, root=root) def parse(f): config = SafeConfigParser() config.read(f) parsed = {} sections = [("log", Log), ("state", State), ("uris", URIs), ("docker", Docker), ("docker.index", DockerIndex), ("containers.image", Image), ("containers.options", Options)] for key, cls in sections: try: parsed[key] = cls(**dict(config.items(key))) except: continue containers = {} if "containers.image" in parsed: containers["image"] = parsed["containers.image"] del parsed["containers.image"] if "containers.options" in parsed: containers["options"] = parsed["containers.options"] del parsed["containers.options"] if len(containers) > 0: parsed["containers"] = Containers(**containers) if "docker.index" in parsed: parsed["index"] = parsed["docker.index"] del parsed["docker.index"] return _Struct(**parsed) def path(): for p in search_path: if os.path.exists(p): return p search_path = ["./deimos.cfg", os.path.expanduser("~/.deimos"), "/etc/deimos.cfg", "/usr/etc/deimos.cfg", "/usr/local/etc/deimos.cfg"]
#!/usr/bin/python ''' Title: Hangman Description: A Simple Hangman Game Author: Usman Sher (@usmansher) Disclaimer: Its Just A Small Guessing Game made By Me (Beginning Of Coding). ''' # Imports import pygame, sys from pygame.locals import * from random import choice # Color Variables RED = (255, 0, 0) GREEN = (0, 255, 0) ORANGE = (255, 100, 0) BLUE = (0, 0, 255) WHITE = (255, 255, 255) BLACK = (0, 0, 0) # Get The Words From a Text File def getWords(): f = open('wordlist.txt') words = [] getLines = f.readline() while getLines: words.append(getLines.strip()) getLines = f.readline() return words # Word Spaces def drawWordSpaces(screen, spaces): x = 10 for i in range(spaces): pygame.draw.line(screen, ORANGE, (x, 350), (x+20, 350), 3) x += 30 # Letters def drawLetter(screen, font, word, guess): x = 10 for letter in word: if letter == guess: letter = font.render(letter, 3, BLACK) screen.blit(letter, (x, 300)) x += 30 # Gallows def drawGallows(screen): pyga
me.draw.rect(screen, BLUE, (450, 350, 100, 10)) pygame.draw.rect(screen, BLUE, (495, 250, 10, 100)) pygame.draw.rect(screen, BLUE, (450, 250, 50, 10)) pygame.draw.rect(screen, BLUE, (450, 250, 10, 25)) # Body Parts def drawMan(screen, bodyPart): if bodyPart ==
'head': pygame.draw.circle(screen, RED, (455, 285), 10) if bodyPart == 'body': pygame.draw.rect(screen, RED, (453, 285, 4, 50)) if bodyPart == 'lArm': pygame.draw.line(screen, RED, (455, 310), (445, 295), 3) if bodyPart == 'rArm': pygame.draw.line(screen, RED, (455, 310), (465, 295), 3) if bodyPart == 'lLeg': pygame.draw.line(screen, RED, (455, 335), (445, 345), 3) if bodyPart == 'rLeg': pygame.draw.line(screen, RED, (455, 335), (465, 345), 3) # The Main Function def main(): x = 800 y = 500 pygame.init() # Initialize Pygame screen = pygame.display.set_mode((x, y)) # Set The Screen Size pygame.display.set_caption('Hangman By Usman Sher') screen.fill(WHITE) # Fill The Background font = pygame.font.SysFont('Courier New', 40) # Set Font & Size drawGallows(screen) # Draw The Gallows guessed = '' words = getWords() # Get Words word = choice(words) # Get one word from words drawWordSpaces(screen, len(word)) # Draw The Word Spaces print word body = ['rLeg', 'lLeg', 'rArm', 'lArm', 'body', 'head'] # Body Parts correct = '' unique = set(word)# Get Unique Words from the Word pygame.display.update()# Update The Display while body and len(correct) < len(unique): # While Bodyparts or Correct Guess is less than Unique Words # Keyboard Events for event in pygame.event.get(): # Enable the Quit Button if event.type == QUIT: sys.exit() # If Key is pressed if event.type == KEYDOWN: # Check Whether Its a Alphabet or not if event.unicode.isalpha(): guess = event.unicode #Store Alphabet in variable guess # Check Whether Guessed Word is Right Or Wrong if guess in word and guess not in correct: #if it is drawLetter(screen, font, word, guess) #Print The Letter on Screen pygame.display.update() # Update The Display correct += guess # Add Guessed Letter to Correct elif guess not in guessed: # If Its Wrong bodyPart = body.pop() # Delete a Bodypart and add it the the variable bodyPart drawMan(screen, bodyPart) # Draw the Man with the Popped Bodypart pygame.display.update() # Update the Display guessed += guess # Add it to variable guessed if body: # Check Whether theres a part left in variable body text = 'You Won!'# If True else: text = 'You Lose! The word was '+ word # If False # print the Text endMessage = font.render(text, 3, BLACK) screen.blit(endMessage, (0, 0)) pygame.display.update() # Enable Quit Button while True: for event in pygame.event.get(): if event.type == QUIT: sys.exit() # Run The Program if __name__ == '__main__': main()
# YouTube Video: https://www.youtube.com/watch?v=wlnx-7cm4Gg from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream import twitter_credentials # # # # TWITTER STREAMER # # # # class TwitterStreamer(): """ Class for streaming and processing live tweets. """ def __init__(self): pass def stream_tweets(self, fetched_tweets_filename, hash_tag_list): # This handles Twitter authetification and the connection to Twitter Streaming API listener = StdOutListener(fetched_tweets_filename) auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET) auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET) stream = Stream(auth, listener) # This line filter Twitter Streams to capture data by the keywords: stream.filter(track=hash_tag_list) # # # # TWITTER STREAM LISTENER # # # # class StdOutListener(StreamListener): """ This is a basic listener that just prints received tweets to stdout. """ def __init__(self, fetched_tweets_filename): self.fetched_tweets_filename = fetched_tweets_filename def on_data(self, data): try: print(data)
with open(self.fetched_tweets_filename, 'a') as tf: tf.write(data) return True except Bas
eException as e: print("Error on_data %s" % str(e)) return True def on_error(self, status): print(status) if __name__ == '__main__': # Authenticate using config.py and connect to Twitter Streaming API. hash_tag_list = ["donal trump", "hillary clinton", "barack obama", "bernie sanders"] fetched_tweets_filename = "tweets.txt" twitter_streamer = TwitterStreamer() twitter_streamer.stream_tweets(fetched_tweets_filename, hash_tag_list)
#!/usr/bin/python import sys sys.path.append('/usr/share/mandriva/') from mcc2.backends
.services.service impor
t Services if __name__ == '__main__': Services.main()
from flask import Flask, request, redirect, render_template, session, flash from mysqlconnection import MySQLConnector import re app = Flask(__name__) mysql = MySQLConnector(app, 'emailval') app.secret_key = 'secret' EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$') @app.route('/') def validation(): return render_template('validation.html') @app.route('/emails', methods=['POST']) def email(): if not EMAIL_REGEX.match(request.form['buttonbox']): flash('invalid emale') return redirect('/') else: flash ('Great Job!'); query = "INSERT INTO email (email,updated_at,created_at) VALUES (:email,NOW(),NOW())" data = {'email':request.form['buttonbox']} mysql.query_db(query,data) query = "SELECT created_at FROM email" query = "SELECT * FROM email" email = mysql.query_db(query) # if len(request.form['buttonbox']) < 1: # flash('need a proper emale') return render_template('email.html', email = email) # @app.rou
te('/emails') # def show(email_id): # query = "SELECT * FROM email WHERE id = :specific_id" # data = {'specific_id': email_id} # emails = mysql.query_db(query, data) # return render_template('email.html', email = email) @a
pp.route('/delete/<id>') def delete(id): query = "DELETE FROM email WHERE id = :id" data = {'id': id} mysql.query_db(query, data) flash("The email address ID {} has been deleted".format(id)) query = "SELECT * FROM email" email = mysql.query_db(query) return render_template('email.html', email = email) app.run(debug=True)
me of the MUMmer program that produced the output - query: path to the query sequence file - subject: path to the subject sequence file """ def __init__(self, name, handle=None): self.name = name self._metadata = None self._comparisons = [] if handle is not None: self.from_delta(handle) def from_delta(self, handle): """Populate the object from the passed .delta or .filter filehandle""" parser = DeltaIterator(handle) for element in parser: if isinstance(element, DeltaMetadata): self._metadata = element if isinstance(element, DeltaComparison): self._comparisons.append(element) @property def comparisons(self): """Comparisons in the .delta file""" return self._comparisons @property def metadata(self): """Metadata from the .delta file""" return self._metadata @property def reference(self): """The reference file for the MUMmer comparison""" return self._metadata.reference @property def program(self): """The MUMmer program used for the comparison""" return self._metadata.program @property def query(self): """The query file for the MUMmer comparison""" return self._metadata.query def __eq__(self, other): # We do not enforce equality of metadata, as the full path to both query and reference is # written in the .delta file, and we care only about the alignment data, and the program # that was used. if not isinstance(other, DeltaData): return False return (self.program == other.program) and ( self._comparison
s == other._comparisons ) def __len__(self): return len(self._comparisons) def __str__(self): """Return the object in .delta format output""" outstr = os.linesep.join( [str(self._metadata)] + [str(_) for _ in self._compar
isons] ) return outstr class DeltaMetadata(object): """Represents the metadata header for a MUMmer .delta file""" def __init__(self): self.reference = None self.query = None self.program = None def __eq__(self, other): if not isinstance(other, DeltaMetadata): return False return (self.reference, self.query, self.program) == ( other.reference, other.query, other.program, ) def __str__(self): return "{} {}{}{}".format(self.reference, self.query, os.linesep, self.program) class DeltaComparison(object): """Represents a comparison between two sequences in a .delta file""" def __init__(self, header, alignments): self.header = header self.alignments = alignments def add_alignment(self, aln): """Add passed alignment to this object :param aln: DeltaAlignment object """ self.alignments.append(aln) def __eq__(self, other): if not isinstance(other, DeltaComparison): return False return (self.header == other.header) and ( sorted(self.alignments) == sorted(other.alignments) ) def __len__(self): return len(self.alignments) def __str__(self): outstr = os.linesep.join([str(self.header)] + [str(_) for _ in self.alignments]) return outstr class DeltaHeader(object): """Represents a single sequence comparison header from a MUMmer .delta file""" def __init__(self, reference, query, reflen, querylen): self.reference = reference self.query = query self.referencelen = int(reflen) self.querylen = int(querylen) def __eq__(self, other): if not isinstance(other, DeltaHeader): return False return (self.reference, self.query, self.referencelen, self.querylen) == ( other.reference, other.query, other.referencelen, other.querylen, ) def __str__(self): return ">{} {} {} {}".format( self.reference, self.query, self.referencelen, self.querylen ) class DeltaAlignment(object): """Represents a single alignment region and scores for a pairwise comparison""" def __init__(self, refstart, refend, qrystart, qryend, errs, simerrs, stops): self.refstart = int(refstart) self.refend = int(refend) self.querystart = int(qrystart) self.queryend = int(qryend) self.errs = int(errs) self.simerrs = int(simerrs) self.stops = int(stops) self.indels = [] def __lt__(self, other): return (self.refstart, self.refend, self.querystart, self.queryend) < ( other.refstart, other.refend, other.querystart, other.queryend, ) def __eq__(self, other): return (self.refstart, self.refend, self.querystart, self.queryend) == ( other.refstart, other.refend, other.querystart, other.queryend, ) def __str__(self): outstr = [ "{} {} {} {} {} {} {}".format( self.refstart, self.refend, self.querystart, self.queryend, self.errs, self.simerrs, self.stops, ) ] + [str(_) for _ in self.indels] return os.linesep.join(outstr) class DeltaIterator(object): """Iterator for MUMmer .delta files. Returns a stream of DeltaMetadata, DeltaComparison and DeltaAlignment objects when iterated over a filehandle The .delta file structure and format is described at http://mummer.sourceforge.net/manual/#nucmeroutput """ def __init__(self, handle): """Instantiate the class with the passed filehandle""" self._handle = handle self._metadata = None # metadata for a .delta file self._header = None # header information for a pairwise comparison self._comparison = None # current comparison region def __iter__(self): """Iterate over elements of the .delta file as DeltaHeader and DeltaAlignment objects""" return iter(self.__next__, None) def __next__(self): """Parse the next element from the .delta file""" # Parse .delta file metadata if self._metadata is None: self._metadata = DeltaMetadata() self._metadata.reference, self._metadata.query = ( self._handle.readline().strip().split() ) self._metadata.program = self._handle.readline().strip() return self._metadata # Parse remaining lines into a DeltaHeader for each comparison, and corresponding # DeltaAlignments line = self._handle.readline() while line: # If we're at the start of an alignment, create a new DeltaAlignment if line.startswith(">"): if self._comparison is not None: return self._comparison self._header = DeltaHeader(*(line[1:].split())) self._comparison = DeltaComparison(self._header, []) # Populate the current pairwise alignment with each individual alignment else: alndata = line.rstrip().split() if len(alndata) > 1: # alignment header alignment = DeltaAlignment(*alndata) elif alndata[0] == "0": alignment.indels.append(alndata[0]) self._comparison.add_alignment(alignment) else: alignment.indels.append(alndata[0]) # Get the next line and return the final comparison if we're at the end of file line = self._handle.readline() if not line: return self._comparison # Generate list of Job objects, one per NUCmer run def generate_nucmer_jobs( groupdata, outdir, nucmer_exe, filter_exe, maxmatch=False, jobprefix="PDPNUCmer" ):
from rest_framework import routers from . import views class SecretsRootView(routers.APIRootView): """ Secrets API root view """ def get_view_name(self): return 'Secrets' router = rout
ers.DefaultRouter() r
outer.APIRootView = SecretsRootView # Field choices router.register(r'_choices', views.SecretsFieldChoicesViewSet, basename='field-choice') # Secrets router.register(r'secret-roles', views.SecretRoleViewSet) router.register(r'secrets', views.SecretViewSet) # Miscellaneous router.register(r'get-session-key', views.GetSessionKeyViewSet, basename='get-session-key') router.register(r'generate-rsa-key-pair', views.GenerateRSAKeyPairViewSet, basename='generate-rsa-key-pair') app_name = 'secrets-api' urlpatterns = router.urls
import requests import hashlib import os import json USERNAME = 'christine' API_KEY = 'd0e4164c2bd99f1f888477fc25cf8c5c104a5cd1' #Read in t
he path with user input (or navigate to the directory in the GUI)
#path = '/home/wildcat/Lockheed/laikaboss/malware/' os.chdir("/home/wildcat/Lockheed/laikaboss") print("Hint: /home/wildcat/Lockheed/laikaboss/malware/") path = raw_input("Enter the path of your file: ") for f in os.listdir(path): os.system("sudo python laika.py {} | jq '.scan_result[]' > /home/wildcat/Lockheed/crits/pyscript/mal3/{}.out".format(os.path.join(path,f), f)) os.chdir("/home/wildcat/Lockheed/crits/pyscript/mal3/") path2 = "/home/wildcat/Lockheed/crits/pyscript/mal3/" for f in os.listdir(path2): read_data = open(f,'r') md5_data = json.load(read_data) file_data = open(f, 'r').read() md5 = md5_data['moduleMetadata']['META_HASH']['HASHES']['md5'] data = {'upload_type': 'metadata', 'filename': f, 'md5': md5, 'source': 'Christine'} files = {'filedata': open(f, 'rb')} url = 'http://localhost:8080/api/v1/samples/?username={0}&api_key={1}'.format(USERNAME, API_KEY) r = requests.post(url, data=data, files=files)
#!/usr/bin/env python # coding: utf-8 import sys import time from twisted.internet import defer, reactor from twisted.python import log import txmongo def getConnection(): print "getting connection..." return txmongo.MongoConnectionPool() def getDatabase(conn, dbName): print "getting database..." return getattr(co
nn, dbName) def getCollection(db, collName): print "getting collection..." return getattr(db, collName) def insertData(coll): print "inserting data..." # insert some data, building a deferred list so that we can later check
# the succes or failure of each deferred result deferreds = [] for x in xrange(10000): d = coll.insert({"something":x*time.time()}, safe=True) deferreds.append(d) return defer.DeferredList(deferreds) def processResults(results): print "processing results..." failures = 0 successes = 0 for success, result in results: if success: successes += 1 else: failures += 1 print "There were %s successful inserts and %s failed inserts." % ( successes, failures) def finish(ignore): print "finishing up..." reactor.stop() def example(): d = getConnection() d.addErrback(log.err) d.addCallback(getDatabase, "foo") d.addCallback(getCollection, "test") d.addCallback(insertData) d.addErrback(log.err) d.addCallback(processResults) d.addErrback(log.err) d.addCallback(finish) return d if __name__ == '__main__': log.startLogging(sys.stdout) example() reactor.run()
elect the option in the next line option_pars['l_linear'] = True # Alfven speed constant along the axis of the flux tube if option_pars['l_const']: option_pars['l_B0_quadz'] = True model_pars['chrom_scale'] *= 5e1 model_pars['p0'] *= 1.5e1 physical_constants['gravity'] *= 1. model_pars['radial_scale'] *= 1. # Alfven speed proportional to sqrt(Z) along the axis of the flux tube elif option_pars['l_sqrt']: option_pars['l_B0_rootz'] = True model_pars['chrom_scale'] *= 5.65e-3 model_pars['p0'] *= 1. physical_constants['gravity'] *= 7.5e3 model_pars['radial_scale'] *= 0.7 # Alfven speed proportional to Z along the axis of the flux tube elif option_pars['l_linear']: option_pars['l_B0_rootz'] = True model_pars['chrom_scale'] *= 0.062 model_pars['p0'] *= 3e2 physical_constants['gravity'] *= 8e3 model_pars['radial_scale'] *= 1. # Alfven speed proportional to Z^2 along the axis of the flux tube elif option_pars['l_square']: option_pars['l_B0_rootz'] = True model_pars['chrom_scale'] *= 1.65 model_pars['p0'] *= 2e4 physical_constants['gravity'] *= 5e4 model_pars['radial_scale'] *= 1. # Alfven speed not defined along the axis of the flux tube else: option_pars['l_B0_rootz'] = True model_pars['chrom_scale'] *= 1. model_pars['p0'] *= 1. #obtain code coordinates and model parameters in astropy units coords = atm.get_coords(model_pars['Nxyz'], u.Quantity(model_pars['xyz'])) #============================================================================== #calculate 1d hydrostatic balance from empirical density profile #============================================================================== pressure_Z, rho_Z, Rgas_Z = atm.get_spruit_hs(coords['Z'], model_pars, physical_constants, option_pars ) #============================================================================== # load flux tube footpoint parameters #============================================================================== # axial location and value of Bz at each footpoint xi, yi, Si = atm.get_flux_tubes( model_pars, coords, option_pars ) #============================================================================== # split domain into processes if mpi #============================================================================== ax, ay, az = np.mgrid[coords['xmin']:coords['xmax']:1j*model_pars['Nxyz'][0], coords['ymin']:coords['ymax']:1j*model_pars['Nxyz'][1], coords['zmin']:coords['zmax']:1j*model_pars['Nxyz'][2]] # split the grid between processes for mpi if l_mpi: x_chunks = np.array_split(ax, size, axis=0) y_chunks = np.array_split(ay, size, axis=0) z_chunks = np.array_split(az, size, axis=0) x = comm.scatter(x_chunks, root=0) y = comm.scatter(y_chunks, root=0) z = comm.scatter(z_chunks, root=0) else: x, y, z = ax, ay, az x = u.Quantity(x, unit=coords['xmin'].unit) y = u.Quantity(y, unit=coords['ymin'].unit) z = u.Quantity(z, unit=coords['zmin'].unit) #============================================================================== # initialize zero arrays in which to add magnetic field and mhs adjustments #============================================================================== Bx = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic x-component By = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic y-component Bz = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic z-component pressure_m = u.Quantity(np.zeros(x.shape), unit=u.Pa) # magneto-hydrostatic adjustment to pressure rho_m = u.Quantity(np.zeros(x.shape), unit=u.kg/u.m**3) # magneto-hydrostatic adjustment to density # initialize zero arrays in which to add balancing forces and magnetic tension Fx = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) # balancing force x-component Fy = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) # balancing force y-component # total tension force for comparison with residual balancing force Btensx = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) Btensy = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) #============================================================================== #calculate the magnetic field and pressure/density balancing expressions #============================================================================== for i in range(0,model_pars['nftubes']): for j in range(i,model_pars['nftubes']): if rank == 0: print'calculating ij-pair:',i,j if i == j: pressure_mi, rho_mi, Bxi, Byi ,Bzi, B2x, B2y =\ atm.construct_magnetic_field( x, y, z, xi[i], yi[i], Si[i], model_pars, option_pars, physical_constants, scales ) Bx, By, Bz = Bxi+Bx, Byi+By ,Bzi+Bz Btensx += B2x Btensy += B2y pressure_m += pressure_mi rho_m += rho_mi else: pressure_mi, rho_mi, Fxi, Fyi, B2x, B2y =\ atm.construct_pairwise_field( x, y, z, xi[i], yi[i], xi[j], yi[j], Si[i], Si[j], model_pars, option_pars, physical_constants, scales ) pressure_m += pressure_mi rho_m += rho_mi Fx += Fxi Fy += Fyi Btensx += B2x Btensy += B2y # clear some memory del pressure_mi, rho_mi, Bxi, Byi ,Bzi, B2x, B2y #============================================================================== # Construct 3D hs arrays and then add the mhs adjustments to obtain atmosphere #============================================================================== # select the 1D array spanning the local mpi process; the add/sub of dz to # ensure all indices are used, but only once indz = np.where(coords['Z'] >= z.min()-0.1*coords['dz']) and \ np.where(coords['Z'] <= z.max()+0.1*coords['dz']) pressure_z, rho_z, Rgas_z = pressure_Z[indz], rho_Z[indz], Rgas_Z[indz] # local proc 3D mhs arrays pressure, rho = atm.mhs_3D_profile(z, pressure_z, rho_z, pressure_m, rho_m ) magp = (Bx**2 + By**2 + Bz**2)/(2.*physical_constants['mu0']) if rank ==0: print'max B corona = ',magp[:,:,-1].max().decompose() energy = atm.get_internal_energy(pressure, magp, physical_constants) #============================================================================ # Save data for SAC and plotting #============================================================================ # set up data directory and file names # may be worthwhile locating on /data if files are large datadir = os.path.expanduser('~/Documents/mhs_atmosphere/'+model_pars['
mode
l']+'/') filename = datadir + model_pars['model'] + option_pars['suffix'] if not os.path.exists(datadir): os.makedirs(datadir) sourcefile = datadir + model_pars['model'] + '_sources' + option_pars['suffix'] aux3D = datadir + model_pars['model'] + '_3Daux' + option_pars['suffix'] aux1D = datadir + model_pars['model'] + '_1Daux' + option_pars['suffix'] # save the variables for the initialisation o
#!/usr/bin/env python # # Limitations: # - doesn't work if another node is using a persistent connection # - all names MUST be fully qualified, else rosservice will fail # # TODO: # - watch out for new services and tap them when they come online # - stop broadcasting a service when the original host dies? # # http://docs.ros.org/diamondback/api/rosservice/html/index.html import sys import inspect import rospy import std_srvs.srv import std_msgs.msg import rosgraph import rosservice import rospy.core import json from pprint import pprint as pp from rospy.impl.tcpros_base import TCPROSTransport # we use the most accurate timer available to the system from timeit import default_timer as timer """ All (tapped) service calls are broadcast to the /rec/srvs topic in a JSON format. The +queue_size+ parameter creates an asynchronous publisher, which is better suited to our needs (higher throughput) """ class ServiceTapper(object): """ Acts a proxy, forwarding a given service call onto its intended recepient, whilst logging details of the service call to the appropriate topic """ def __handler(self, server, service_name, proxy, req): time_start = timer() client = req._connection_header['callerid'] # generate a JSON-encodable description of the parameters for this request # TO
DO: will fail with complex, embedded objects params = {p: getattr(req, p) for p in req.__slots__} # send the request and wait for a response success = False try: ret = proxy(req) success = True response = {p: getattr(ret, p) for p in ret.__slots__} except rospy.ServiceException, e: success = False response = {'reason': e} # log the
service call finally: time_end = timer() time_duration = time_end - time_start log = { 'service': service_name, 'server': server, 'client': client, 'time_start': time_start, 'time_end': time_end, 'time_duration': time_duration, 'params': params, 'response': response, 'success': success } serviceCallPublisher.publish(json.dumps(log)) return ret """ Listens to all activity on a given service """ def listen_to(self, service_name): rospy.loginfo("Tapping service: {}".format(service_name)) # block until the service is available rospy.wait_for_service(service_name) # determine which node provides the given service server = rosservice.get_service_node(service_name) assert not server is None # get the class used by this service service_cls = rosservice.get_service_class_by_name(service_name) # create a persistent proxy to that service # inject a persistent connection into the proxy, so that when we replace # the original service, we can still forward messages onto the old one proxy = rospy.ServiceProxy(service_name, service_cls, persistent=True) # TODO: listen for failures # http://docs.ros.org/jade/api/rospy/html/rospy.impl.tcpros_service-pysrc.html#ServiceProxy service_uri = self.master.lookupService(proxy.resolved_name) (dest_addr, dest_port) = rospy.core.parse_rosrpc_uri(service_uri) proxy.transport = TCPROSTransport(proxy.protocol, proxy.resolved_name) proxy.transport.buff_size = proxy.buff_size proxy.transport.connect(dest_addr, dest_port, service_uri) # record the URI of the original service, so we can restore it later self.tapped[service_name] = service_uri # create a new, tapped service, with the same name tap = lambda r: self.__handler(server, service_name, proxy, r) rospy.Service(service_name, service_cls, tap) rospy.loginfo("Tapped service: {}".format(service_name)) """ Listens to all activity on all specified services """ def listen(self, services): rospy.loginfo("Tapping services...") services = rosservice.get_service_list(include_nodes=True) for (service, node) in services: # ignore irrelevant services if node == 'rostrace' or service.endswith('/get_loggers') or service.endswith('/set_logger_level'): continue self.listen_to(service) rospy.loginfo("Tapped services") """ Restores all tapped services to their original form. Must be called before the program is closed, otherwise those services will become unavailable. """ def restore(self): rospy.loginfo("Restoring services...") for (service_name, uri) in self.tapped.items(): rospy.loginfo("Restoring service: {}".format(service_name)) self.master.registerService(service_name, uri, uri) rospy.loginfo("Restored service: {}".format(service_name)) rospy.loginfo("Restored services") """ Constructs a new service tapper """ def __init__(self): self.master = rosgraph.Master('/roscore') self.publisher = \ rospy.Publisher('rec/srvs', std_msgs.msg.String, queue_size=10) self.tapped = {}
sult["OK"]: return result failed.update(result["Value"]["Failed"]) successful = result["Value"]["Successful"] return S_OK({"Successful": successful, "Failed": failed}) def _getFileRelatives(self, lfns, depths, relation, connection=False): connection = self._getConnection(connection) failed = {} successful = {} result = self._findFiles(list(lfns), connection=con
nection) if not result["OK"]: return result if result["Value"]["Failed"]: failed.update(result["Value"]["Failed"]) for lfn in result["Value"]["Failed"]: lfns.pop(lfn)
if not lfns: return S_OK({"Successful": successful, "Failed": failed}) inputIDDict = {} for lfn in result["Value"]["Successful"]: inputIDDict[result["Value"]["Successful"][lfn]["FileID"]] = lfn inputIDs = list(inputIDDict) if relation == "ancestor": result = self._getFileAncestors(inputIDs, depths, connection) else: result = self._getFileDescendents(inputIDs, depths, connection) if not result["OK"]: return result failed = {} successful = {} relDict = result["Value"] for id_ in inputIDs: if id_ in relDict: result = self._getFileLFNs(list(relDict[id_])) if not result["OK"]: failed[inputIDDict[id]] = "Failed to find %s" % relation else: if result["Value"]["Successful"]: resDict = {} for aID in result["Value"]["Successful"]: resDict[result["Value"]["Successful"][aID]] = relDict[id_][aID] successful[inputIDDict[id_]] = resDict for aID in result["Value"]["Failed"]: failed[inputIDDict[id_]] = "Failed to get the ancestor LFN" else: successful[inputIDDict[id_]] = {} return S_OK({"Successful": successful, "Failed": failed}) def getFileAncestors(self, lfns, depths, connection=False): return self._getFileRelatives(lfns, depths, "ancestor", connection) def getFileDescendents(self, lfns, depths, connection=False): return self._getFileRelatives(lfns, depths, "descendent", connection) def _getExistingMetadata(self, lfns, connection=False): connection = self._getConnection(connection) # Check whether the files already exist before adding res = self._findFiles(lfns, ["FileID", "Size", "Checksum", "GUID"], connection=connection) if not res["OK"]: return res successful = res["Value"]["Successful"] failed = res["Value"]["Failed"] for lfn, error in list(failed.items()): if error == "No such file or directory": failed.pop(lfn) return S_OK((successful, failed)) def _checkExistingMetadata(self, existingLfns, lfns): failed = {} successful = {} fileIDLFNs = {} for lfn, fileDict in existingLfns.items(): fileIDLFNs[fileDict["FileID"]] = lfn # For those that exist get the replicas to determine whether they are already registered res = self._getFileReplicas(list(fileIDLFNs)) if not res["OK"]: for lfn in fileIDLFNs.values(): failed[lfn] = "Failed checking pre-existing replicas" else: replicaDict = res["Value"] for fileID, lfn in fileIDLFNs.items(): fileMetadata = existingLfns[lfn] existingGuid = fileMetadata["GUID"] existingSize = fileMetadata["Size"] existingChecksum = fileMetadata["Checksum"] newGuid = lfns[lfn]["GUID"] newSize = lfns[lfn]["Size"] newChecksum = lfns[lfn]["Checksum"] # Ensure that the key file metadata is the same if (existingGuid != newGuid) or (existingSize != newSize) or (existingChecksum != newChecksum): failed[lfn] = "File already registered with alternative metadata" # If the DB does not have replicas for this file return an error elif fileID not in replicaDict or not replicaDict[fileID]: failed[lfn] = "File already registered with no replicas" # If the supplied SE is not in the existing replicas return an error elif not lfns[lfn]["SE"] in replicaDict[fileID].keys(): failed[lfn] = "File already registered with alternative replicas" # If we get here the file being registered already exists exactly in the DB else: successful[lfn] = True return successful, failed def _checkUniqueGUID(self, lfns, connection=False): connection = self._getConnection(connection) guidLFNs = {} failed = {} for lfn, fileDict in lfns.items(): guidLFNs[fileDict["GUID"]] = lfn res = self._getFileIDFromGUID(list(guidLFNs), connection=connection) if not res["OK"]: return dict.fromkeys(lfns, res["Message"]) for guid, fileID in res["Value"].items(): # resolve this to LFN failed[guidLFNs[guid]] = "GUID already registered for another file %s" % fileID return failed def removeFile(self, lfns, connection=False): connection = self._getConnection(connection) """ Remove file from the catalog """ successful = {} failed = {} res = self._findFiles(lfns, ["DirID", "FileID", "Size"], connection=connection) if not res["OK"]: return res for lfn, error in res["Value"]["Failed"].items(): if error == "No such file or directory": successful[lfn] = True else: failed[lfn] = error fileIDLfns = {} lfns = res["Value"]["Successful"] for lfn, lfnDict in lfns.items(): fileIDLfns[lfnDict["FileID"]] = lfn res = self._computeStorageUsageOnRemoveFile(lfns, connection=connection) if not res["OK"]: return res directorySESizeDict = res["Value"] # Now do removal res = self._deleteFiles(list(fileIDLfns), connection=connection) if not res["OK"]: for lfn in fileIDLfns.values(): failed[lfn] = res["Message"] else: # Update the directory usage self._updateDirectoryUsage(directorySESizeDict, "-", connection=connection) for lfn in fileIDLfns.values(): successful[lfn] = True return S_OK({"Successful": successful, "Failed": failed}) def _computeStorageUsageOnRemoveFile(self, lfns, connection=False): # Resolve the replicas to calculate reduction in storage usage fileIDLfns = {} for lfn, lfnDict in lfns.items(): fileIDLfns[lfnDict["FileID"]] = lfn res = self._getFileReplicas(list(fileIDLfns), connection=connection) if not res["OK"]: return res directorySESizeDict = {} for fileID, seDict in res["Value"].items(): dirID = lfns[fileIDLfns[fileID]]["DirID"] size = lfns[fileIDLfns[fileID]]["Size"] directorySESizeDict.setdefault(dirID, {}) directorySESizeDict[dirID].setdefault(0, {"Files": 0, "Size": 0}) directorySESizeDict[dirID][0]["Size"] += size directorySESizeDict[dirID][0]["Files"] += 1 for seName in seDict.keys(): res = self.db.seManager.findSE(seName) if not res["OK"]: return res seID = res["Value"] size = lfns[fileIDLfns[fileID]]["Size"] directorySESizeDict[dirID].setdefault(seID, {"Files": 0, "Size": 0}) directorySESizeDict[dirID][seID]["Size"] += size directorySESizeDict[dirID][seID]["Files"] += 1 return
#!/usr/bin/env python """ This activity will calculate the average of ratios between CPU request and Memory request by each event type. These fields are optional and could be null. """ # It will connect to DataStoreClient from sciwonc.dataflow.DataStoreClient import DataStoreClient import ConfigDB_Average_0 # connector and config client = DataStoreClient("mongodb", ConfigDB_Average_0) config = ConfigDB_Average_0 # according to config dataList = client.getData() # return an array of docs (like a csv reader) output = [] if(dataList): for i in dat
aList: sum_ratio = 0 total_valid_tasks = 0 total_tasks = 0 event_type = i[config.COLUMN] while True: doc = i['data'].next() if doc is None: break;
total_tasks += 1 if(doc['ratio cpu memory']): sum_ratio = sum_ratio + float(doc['ratio cpu memory']) total_valid_tasks += 1 newline = {} newline['event type'] = event_type newline['sum ratio cpu memory'] = sum_ratio newline['total valid tasks'] = total_valid_tasks newline['total tasks'] = total_tasks if((sum_ratio > 0) and (total_valid_tasks > 0)): newline['mean ratio cpu memory'] = sum_ratio / total_valid_tasks else: newline['mean ratio cpu memory'] = None output.append(newline) # save client.saveData(output)
# -*- coding: utf-8 -*- from setuptools import setup, find_packages import django_cassandra_engine as meta DESCRIPTION = 'Django Cassandra Engine - the Cassandra backend for Django' try: with open('README.rst', 'rb') as f: LONG_DESCRIPTION = f.read().decode('utf-8') except IOError: with open('README.md', 'rb') as f: LONG_DESCRIPTION = f.read().decode('utf-8') with open('requirements.txt', 'r') as f: DEPENDENCIES = f.read().splitlines() setup( name='django-
cassandra-engine', version='.'.join(map(str, meta.__version__)
), author=meta.__author__, author_email=meta.__contact__, url=meta.__homepage__, keywords='django cassandra engine backend driver wrapper database nonrel ' 'cqlengine', download_url='https://github.com/r4fek/django-cassandra-engine/tarball/master', license='2-clause BSD', description=DESCRIPTION, long_description=LONG_DESCRIPTION, install_requires=DEPENDENCIES, packages=find_packages( exclude=['tests', 'tests.*', 'testproject', 'testproject.*']), test_suite='testproject.runtests.main', tests_require=['mock==1.0.1', 'django-nose'], zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Environment :: Plugins', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Database', 'Topic :: Internet', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
__author__ = 'Nataly' from model.project import Project import string import random def random_string(prefix, maxlen): symbols = string
.ascii_letters return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) def test_add_project(app): project = Project(random_string("name_", 10), random_string("description_", 10)) old_list = app.soap.get_project_list() if project in old_list: app.project.delete_project(project) old_list = app.soap.ge
t_project_list() app.project.add_project(project) new_list = app.soap.get_project_list() old_list.append(project) assert sorted(old_list, key=Project.id_or_max) == sorted(new_list, key=Project.id_or_max)
#!/usr/bin/env python3 # Copyright
(c) 2020 The Bitcoin Core developers # Distributed under the MIT software
license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test fee filters during and after IBD.""" from decimal import Decimal from test_framework.messages import COIN from test_framework.test_framework import BitcoinTestFramework MAX_FEE_FILTER = Decimal(9170997) / COIN NORMAL_FEE_FILTER = Decimal(100) / COIN class P2PIBDTxRelayTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [ ["-minrelaytxfee={}".format(NORMAL_FEE_FILTER)], ["-minrelaytxfee={}".format(NORMAL_FEE_FILTER)], ] def run_test(self): self.log.info("Check that nodes set minfilter to MAX_MONEY while still in IBD") for node in self.nodes: assert node.getblockchaininfo()['initialblockdownload'] self.wait_until(lambda: all(peer['minfeefilter'] == MAX_FEE_FILTER for peer in node.getpeerinfo())) # Come out of IBD by generating a block self.generate(self.nodes[0], 1) self.sync_all() self.log.info("Check that nodes reset minfilter after coming out of IBD") for node in self.nodes: assert not node.getblockchaininfo()['initialblockdownload'] self.wait_until(lambda: all(peer['minfeefilter'] == NORMAL_FEE_FILTER for peer in node.getpeerinfo())) if __name__ == '__main__': P2PIBDTxRelayTest().main()
import os Basketball
PlayerDatabase = 'Basket
ballPlayerDatabase.p' Root_URL = 'https://' + os.getenv('basketball_root_url')
# Copyright 2015, Ansible, Inc. # Luke Sneeringer <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r
equired by applicable law or agreed to in writing, software # distributed under the License is distrib
uted on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import click class Command(click.Command): """A Command subclass that adds support for the concept that invocation without arguments assumes `--help`. This code is adapted by taking code from click.MultiCommand and placing it here, to get just the --help functionality and nothing else. """ def __init__(self, name=None, no_args_is_help=True, **kwargs): self.no_args_is_help = no_args_is_help super(Command, self).__init__(name=name, **kwargs) def parse_args(self, ctx, args): """Parse arguments sent to this command. The code for this method is taken from MultiCommand: https://github.com/mitsuhiko/click/blob/master/click/core.py It is Copyright (c) 2014 by Armin Ronacher. See the license: https://github.com/mitsuhiko/click/blob/master/LICENSE """ if not args and self.no_args_is_help and not ctx.resilient_parsing: click.echo(ctx.get_help()) ctx.exit() return super(Command, self).parse_args(ctx, args)
onth) senc_page_retry_times = self.max_retry while True: senc_page_retry_times -= 1 key, level, call_log_history, wrong_flag = self.deal_call_log('1', kwargs['tel'], query_month) if level == -1: month_missing += 1 possibly_missing_list.append(query_month)
break elif level != 0: now_time = time.time() if senc_p
age_retry_times >0: continue elif now_time<end_time: time.sleep(rand_time) else: missing_list.append(query_month) if wrong_flag == 'website': website_num += 1 elif wrong_flag == 'crawler': crawler_num += 1 break else: call_log.extend(call_log_history) break missing_list = list(set(missing_list)) if len(possibly_missing_list + missing_list) == 6: if crawler_num > 0: return 9, 'crawl_error', call_log, missing_list, possibly_missing_list return 9, 'website_busy_error', call_log, missing_list, possibly_missing_list return 0, "success", call_log, missing_list, possibly_missing_list def deal_call_log(self, selectType, tel, query_month): call_log_url = 'http://hl.189.cn/service/cqd/queryDetailList.do?' # selectType 为1 表示历史详单, 为2表示当月详单 call_log_data = { 'isMobile': '0', 'seledType': '9', 'queryType': "", 'pageSize': '9999', 'pageNo': '1', 'flag': '', 'pflag': '', 'accountNum': tel + ':2000004', 'callType': '3', 'selectType': selectType, 'detailType': '9', 'selectedDate': query_month, 'method': 'queryCQDMain' } headers = { 'Referer': 'http://hl.189.cn/service/cqd/detailQueryCondition.do', } for retry in xrange(self.max_retry): code, key, resp = self.get(call_log_url, params=call_log_data, headers=headers) if code != 0: pass else: break else: return key, code, [], 'website' if u'没有查找到相关数据' in resp.text: self.log('crawler', '没有查找到相关数据', resp) return '', -1, '', '' else: try: call_month_log = self.call_log_get(resp.text, query_month) except: error = traceback.format_exc() self.log('crawler', 'html_error : %s' % error, resp) return "html_error", 9, [], 'crawler' return 'success', 0, call_month_log, '' def call_log_get(self, response, query_month): call_month_log = [] selector = etree.HTML(response) rows = selector.xpath('//*[@id="tb1"]//tr') for i, row in enumerate(rows): call_log = {} call_log['month'] = query_month # 费用 cost = row.xpath('.//script') if len(cost) <= 0: continue cost = cost[0] cost = cost.xpath('string(.)') call_cost = int(re.findall('var thMoney = new String\((\d+)\);', cost)[0]) # 转换单位(元) if call_cost % 100 == 0: call_cost = call_cost / 100 else: call_cost = round(call_cost / 100, 2) call_log['call_cost'] = str(call_cost) call_log['call_tel'] = row.xpath('.//td[5]/text()')[0] call_log['call_method'] = row.xpath('.//td[4]/text()')[0] call_log['call_type'] = row.xpath('.//td[7]/text()')[0] # call_log['call_from'] = row.xpath('.//td[3]/text()')[0] raw_call_from = row.xpath('.//td[3]/text()')[0].strip() call_from, error = self.formatarea(raw_call_from) if call_from: call_log['call_from'] = call_from else: # self.log("crawler", "{} {}".format(error, raw_call_from), "") call_log['call_from'] = raw_call_from call_duration = row.xpath('.//td[6]/text()')[0] time_list = call_duration.split(':') call_log['call_duration'] = str(int(time_list[0]) * 3600 + int(time_list[1]) * 60 + int(time_list[2])) call_log['call_to'] = '' call_time = row.xpath('./td[2]/text()')[0] timeArray = time.strptime(call_time, "%Y%m%d%H%M%S") call_time_timeStamp = str(int(time.mktime(timeArray))) call_log['call_time'] = call_time_timeStamp call_month_log.append(call_log) return call_month_log def crawl_phone_bill(self, **kwargs): """ bill_month string 201612 账单月份 bill_amount string 10.00 账单总额 bill_package string 10.00 套餐及固定费 bill_ext_calls string 10.00 套餐外语音通信费 bill_ext_data string 10.00 套餐外上网费 bill_ext_sms string 10.00 套餐外短信费 bill_zengzhifei string 10.00 增值业务费 bill_daishoufei string 10.00 代收业务费 bill_qita string 10.00 其他费用 """ month_fee = [] missing_list = [] today = date.today() crawler_num = 0 search_month = [x for x in range(0, -6, -1)] month_bill_url = 'http://hl.189.cn/service/billDateChoiceNew.do' for query_month in search_month: month_fee_data = {} query_date = today + relativedelta(months=query_month) query_month = "%d%02d" % (query_date.year, query_date.month) month_bill_data = { 'method': 'doSearch', 'selectDate': query_month } for retry in xrange(self.max_retry): code, key, resp = self.post(month_bill_url, data=month_bill_data) if code != 0: pass else: break else: missing_list.append(query_month) continue if u'对不起,系统忙,请稍后再试!' in resp.text: missing_list.append(query_month) self.log('website', u'官网繁忙或没有本月账单', resp) continue try: # with open('bill.py','w')as f: # f.write(resp.text) month_fee_data['bill_month'] = "%d%02d" % (query_date.year, query_date.month) bill_amount = re.findall(r'本期费用合计:(\d*\.?\d+\.\d+)', resp.text.encode('utf8')) if not bill_amount: missing_list.append(query_month) continue else: month_fee_data['bill_amount'] = bill_amount[0] bill_package = str(float(re.findall(r'基本月租费\s+</td>\s+<td class="td5">[\D]+(\d+\.\d+)', resp.text.encode('utf8'))[0])) # 改版前,2017-12-11 # bill_package = str(float(re.findall(r'基本月租费\s+</td>\s+<td class="td5">[\D]+(\d+\.\d+)', # resp.text.encode('utf8'))[0]) + # float(re.findall(r'手机上网月功能费业务可选包\s+</td>\s+<td class="td5">[\D]+(\d+\.\d+)', # resp.text.encode('utf8'))[0])) month_fee_data['bill_package'] = bill_package bill_ext_calls = re.findall(r'国内通话费\s+</td>\s+<td class="td5">\s+(\d+\.\d+)', resp.text.encode('utf8')) month_fee_data['bill_ext_calls'] = bill_ext_calls[0] if bill_ext_calls else '' bill_ext_data = re.findall(r'手机国内上网费\s+</td>\s+<td class="td5">\s+(\d+\.\d+)',resp.text.encode('utf8'))
"""Simple script to delete all forms with "PLACEHOLDER" as their transcription and translation value. """ import sys import json from old_client import OLDClient url = 'URL' username = 'USERNAME' password = 'PASSWORD' c = OLDClient(url) logged_in = c.login(username, password) if not logged_in: sys.exit('Could not log in') search = { "query": { "filter": ['and', [ ['Form', 'transcrip
tion', '=', 'PLACEHOLDER'], ['Form', 'translations', 'transcription', '=', 'PLACEHOLDER'] ]] } } empty_forms = c.search('forms', search) print 'Deleting %d forms.' % len(empty_forms) deleted_count = 0 for form in empty_forms: delete_path = 'forms/%d' % form['id'] resp = c.delete(delete_path) if (type(resp) is not dict) or resp['id'] != form['id']: print 'Failed to delete form %d' % form['id'] else: deleted_count += 1 print 'Deleted %d forms
.' % deleted_count
from .curry_spec import
CurrySpec, ArgValues from .arg_values_fulfill_curry_spec import arg_values_fulfill_curry_spec from .make_func_curry_spec import make_func_curry_spec from .remove_args_from_curry_spec import remove_args_from_c
urry_spec
import re from measures.periodicValues.PeriodicValues import PeriodicValues from measures.generic.GenericMeasure import GenericMeasure as GenericMeasure import measures.generic.Units as Units class Overhead(GenericMeasure): def __init__(self, period, simulationTime): GenericMeasure.__init__(self, '', period, simulationTime, Units.MESSAGE_OVERHEAD) self.__measures = [] self.__initializePattern = re.compile('INFO peer.BasicPeer - Peer ([0-9]+) initializing ([0-9]+\,[0-9]+).*?') self.__neighbors = 0 def addMeasure(self, measure): self.__measures.append(measure) def parseLine(self, line): m = self.__initializePattern.match(line) if m is not None:
self.__neighbors += 1 return for measure in self.__measures: measure.parseLine(line) def getValues(self): return PeriodicValues(0, self.getPeriod(), self.getSimulationTime()) def getTotalValue(self): total = 0 for measure in self.__measures: total += measure.getTotalValue() return tot
al / float(self.__neighbors) / self.getSimulationTime()
# Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com> # # This file is part of nbxmpp. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; If not, see <http://www.gnu.org/licenses/>. from nbxmpp.namespaces import Namespace from nbxmpp.protocol import NodeProcessed from nbxmpp.structs import StanzaHandler from nbxmpp.task import iq_request_task from nbxmpp.errors import MalformedStanzaError from nbxmpp.modules.base import BaseModule from nbxmpp.modules.util import raise_if_error from nbxmpp.modules.bookmarks.util import parse_bookmarks from nbxmpp.modules.bookmarks.util import build_storage_node BOOKMARK_OPTIONS = { 'pubsub#persist_items': 'true', 'pubsub#access_model': 'whitelist', } class PEPBookmarks(BaseModule): _depends =
{ 'publish': 'PubSub', 'request_items': 'PubSub', } def __init__(self, client): BaseModule.__init__(self, client) self._client = clie
nt self.handlers = [ StanzaHandler(name='message', callback=self._process_pubsub_bookmarks, ns=Namespace.PUBSUB_EVENT, priority=16), ] def _process_pubsub_bookmarks(self, _client, stanza, properties): if not properties.is_pubsub_event: return if properties.pubsub_event.node != Namespace.BOOKMARKS: return item = properties.pubsub_event.item if item is None: # Retract, Deleted or Purged return try: bookmarks = parse_bookmarks(item, self._log) except MalformedStanzaError as error: self._log.warning(error) self._log.warning(stanza) raise NodeProcessed if not bookmarks: self._log.info('Bookmarks removed') return pubsub_event = properties.pubsub_event._replace(data=bookmarks) self._log.info('Received bookmarks from: %s', properties.jid) for bookmark in bookmarks: self._log.info(bookmark) properties.pubsub_event = pubsub_event @iq_request_task def request_bookmarks(self): _task = yield items = yield self.request_items(Namespace.BOOKMARKS, max_items=1) raise_if_error(items) if not items: yield [] bookmarks = parse_bookmarks(items[0], self._log) for bookmark in bookmarks: self._log.info(bookmark) yield bookmarks @iq_request_task def store_bookmarks(self, bookmarks): _task = yield self._log.info('Store Bookmarks') self.publish(Namespace.BOOKMARKS, build_storage_node(bookmarks), id_='current', options=BOOKMARK_OPTIONS, force_node_options=True)
# -*- coding: utf-8 -*- from __future__ import unicode_literals import eventlet eventlet.monkey_patch() import re import sys import errno import logging from settings import LOG_NAME class BaseAuthConfig(object): """ read auth config and store it. Try be a singletone """ def __init__(self): self._configs = {} @staticmethod def _read_config(cfg_file): """ Read OS auth config file cfg_fil
e -- the path to config file """ auth_conf_errors = { 'OS_TENANT_NAME': 'Missing tenant name.', 'OS_USERNAME': 'Missing username.', 'OS_PASSWORD': 'Missing password.', 'OS_AUTH_URL': 'Missing API url.', } rv = {} stripchars = " \'\"" LOG = logging.getLogger(LOG_NAME)
try: with open(cfg_file) as f: for line in f: rg = re.match(r'\s*export\s+(\w+)\s*=\s*(.*)', line) if rg: rv[rg.group(1).strip(stripchars)] = \ rg.group(2).strip(stripchars) except IOError: LOG.error("Can't open file '{path}'".format(path=cfg_file)) sys.exit(errno.ENOENT) # error detection exit_msg = [] for i, e in auth_conf_errors.iteritems(): if rv.get(i) is None: exit_msg.append(e) if len(exit_msg) > 0: for msg in exit_msg: LOG.error("AUTH-config error: '{msg}'".format(msg=msg)) sys.exit(errno.EPROTO) return rv def read(self, cfg_filename='/root/openrc'): """ Read or get from cache OS auth config file Args: cfg_filename (str) -- the path to config file Returns: Dict of auth params. Raises: IOError: if file can't readable or not wound. """ rv = self._configs.get(cfg_filename) if rv: return rv rv = self._read_config(cfg_filename) self._configs[cfg_filename] = rv return self._configs.get(cfg_filename) AuthConfig = BaseAuthConfig() # vim: tabstop=4 shiftwidth=4 softtabstop=4
ve self.is_active = self.is_time_valid(now) # If we got a change, log it! if self.is_active != was_active: _from = 0 _to = 0 # If it's the start, get a special value for was if was_active is None: _from = -1 if was_active: _from = 1 if self.is_active: _to = 1 # Now raise the log naglog_result('info', 'TIMEPERIOD TRANSITION: %s;%d;%d' % (self.get_name(), _from, _to)) # clean the get_next_valid_time_from_t cache # The entries are a dict on t. t < now are useless # Because we do not care about past anymore. # If not, it's not important, it's just a cache after all :) def clean_cache(self): now = int(time.time()) t_to_del = [] for t in self.cache: if t < now: t_to_del.append(t) for t in t_to_del: del self.cache[t] # same for the invalid cache t_to_del = [] for t in self.invalid_cache: if t < now: t_to_del.append(t) for t in t_to_del: del self.invalid_cache[t] def get_next_valid_time_from_t(self, t): # first find from cache t = int(t) original_t = t #logger.debug("[%s] Check valid time for %s" % ( self.get_name(), time.asctime(time.localtime(t))) res_from_cache = self.find_next_valid_time_from_cache(t) if res_from_cache is not None: return res_from_cache still_loop = True # Loop for all minutes... while still_loop: local_min = None # Ok, not in cache... dr_mins = [] s_dr_mins = [] for dr in self.dateranges: dr_mins.append(dr.get_next_valid_time_from_t(t)) s_dr_mins = sorted([d for d in dr_mins if d is not None]) for t1 in s_dr_mins: if not self.exclude and still_loop is True: # No Exclude so we are good local_min = t1 still_loop = False else: for tp in self.exclude: if not tp.is_time_valid(t1) and still_loop is True: # OK we found a date that is not valid in any exclude timeperiod local_min = t1 still_loop = False if local_min is None: # print "Looking for next valid date" exc_mins = [] if s_dr_mins != []: for tp in self.exclude: exc_mins.append(tp.get_next_invalid_time_from_t(s_dr_mins[0])) s_exc_mins = sorted([d for d in exc_mins if d is not None]) if s_exc_mins != []: local_min = s_exc_mins[0] if local_min is None: still_loop = False else: t = local_min # No loop more than one year if t > original_t + 3600*24*366 + 1: still_loop = False local_min = None # Ok, we update the cache... self.cache[original_t] = local_min return local_min def get_next_invalid_time_from_t(self, t): #print '\n\n', self.get_name(), 'Search for next invalid from', time.asctime(time.localtime(t)), t t = int(t) original_t = t still_loop = True # First try to find in cache res_from_cache = self.find_next_invalid_time_from_cache(t) if res_from_cache is not None: return res_from_cache # Then look, maybe t is already invalid if not self.is_time_valid(t): return t local_min = t res = None # Loop for all minutes... while still_loop: #print "Invalid loop with", time.asctime(time.localtime(local_min)) dr_mins = [] #val_valids = [] #val_inval = [] # But maybe we can find a better solution with next invalid of standard dateranges #print self.get_name(), "After valid of exclude, local_min =", time.asctime(time.localtime(local_min)) for dr in self.dateranges: #print self.get_name(), "Search a next invalid from DR", time.asctime(time.localtime(local_min)) #print dr.__dict__ m = dr.get_next_invalid_time_from_t(local_min) #print self.get_name(), "Dr", dr.__dict__, "give me next invalid", time.asctime(time.localtime(m)) if m is not None: # But maybe it's invalid for this dr, but valid for other ones. #if not self.is_time_valid(m): # print "Final: Got a next invalid at", time.asctime(time.localtime(m)) dr_mins.append(m) #if not self.is_time_valid(m): # val_inval.append(m) #else: # val_valids.append(m) # print "Add a m", time.asctime(time.localtime(m)) #else: # print dr.__dict__ # print "FUCK bad result\n\n\n" #print "Inval" #for v in val_inval: # print "\t", time.asctime(time.localtime(v)) #print "Valid" #for v in val_valids: # print "\t", time.asctime(time.localtime(v))
if dr_mins != []: local_min = min(dr_mins) # Take the minimum valid as lower for next search #local_min_valid = 0 #if val_valids != []: # local_min_valid = min(val_valids) #if local_min_valid != 0: # local_min = local_min_valid #else: #
local_min = min(dr_mins) #print "UPDATE After dr: found invalid local min:", time.asctime(time.localtime(local_min)), "is valid", self.is_time_valid(local_min) #print self.get_name(), 'Invalid: local min', local_min #time.asctime(time.localtime(local_min)) # We do not loop unless the local_min is not valid if not self.is_time_valid(local_min): still_loop = False else: # continue until we reach too far..., in one minute # After one month, go quicker... if local_min > original_t + 3600*24*30: local_min += 3600 else: # else search for 1min precision local_min += 60 # after one year, stop. if local_min > original_t + 3600*24*366 + 1: # 60*24*366 + 1: still_loop = False #print "Loop?", still_loop # if we've got a real value, we check it with the exclude if local_min is not None: # Now check if local_min is not valid for tp in self.exclude: #print self.get_name(),"we check for invalid", time.asctime(time.localtime(local_min)), 'with tp', tp.name if tp.is_time_valid(local_min): still_loop = True # local_min + 60 local_min = tp.get_next_invalid_time_from_t(local_min+60) # No loop more than one year if local_min > original_t + 60*24*366 + 1: still_loop = False res = None if not still_loop: # We find a possible value # We take the result the minimal possible if res is None or local_min < res: res = local_min #print "Finished Return the next invalid", time.asctime(time.localtime(local_min)) # Ok, we update the cache... self.invalid_cache[original_t] = local_min return local_min def has(self, prop): return h
from dialtone.blue
prints.message.views import bp as message # n
oqa
tures[ self._structures.keys()[ int(items[6])]].get_value( di, surn=items[8], param=items[10]))) else: res.append((path, self._devices[ self._devices.keys()[ int(items[6])]].get_value( di, surn=items[8], param=items[10]))) # res.append((items[-1], pl)) return xmllist.format( pt='ParameterValue', val='\n'.join([parameter.format(resp=r) for r in res])) def r_set_values(self): pass def r_get_attributes(self): pass def r_get_configuration_update(self): pass def r_get_current_configuration_version(self): pass def r_get_supported_data_models_update(self): pass def r_get_supported_parameters_update(self): pass def r_get_alarms_enabled(self): pass class NodeCollection(object): def __init__(self, struct, devicelist): self.collection = struct self.devices = devicelist def get(self, var, path): print('get') def get_device(cat): devices = {'thermostats': Thermostat, 'smoke_co_alarms': SmokeAlarm} if cat in devices: return devices[cat] else: print('unknown device type: %s' % cat) def dictdiffupdate(old, new): # print('2') # print(new.keys()) diff = {} for k, v in new.iteritems(): if k not in old: # print('not: %s' % k) diff.update({k: v}) elif isinstance(v, dict): # print('dict') d = dictdiffupdate(old[k], v) if len(d) > 0: diff.update({k: d}) else: # print('basic') if v != old[k]: diff.update({k: v}) return diff if __name__ == '__main__': from twisted.internet import reactor d = { 'path': '/', 'data': { 'structures': { 'fwo7ooZml1BE5o_zUEVOOAmD6p4_K' + 'Kcf5h-hyF9S9gGD8gz61GVajg': { 'name': 'Chave', 'away': 'home',
'time_zone': 'Europe/Paris', 'smoke_co_alarms': ['kpv19WDjBwPi-fbhzZ5CpbuE3_EunExt'], 'postal_code': '13005', 'thermostats': ['o4WARbb6TBa0Z81uC9faoLuE3_EunExt',
'o4WARbb6TBZmNT32aMeJ8ruE3_EunExt'], 'country_code': 'FR', 'structure_id': 'fwo7ooZml1BE5o_zUEVOOAmD6p4_KKcf5h-hy' + 'F9S9gGD8gz61GVajg', 'wheres': { 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcBasUHCKLxFAZnU3k8GF90g': { 'where_id': 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcBasUHC' + 'KLxFAZnU3k8GF90g', 'name': 'Entryway'}}}}, 'devices': { 'thermostats': { 'o4WARbb6TBa0Z81uC9faoLuE3_EunExt': { 'locale': 'fr-CA', 'hvac_state': 'cooling', 'away_temperature_high_c': 24.0, 'humidity': 50, 'away_temperature_high_f': 76, 'away_temperature_low_f': 55, 'temperature_scale': 'C', 'away_temperature_low_c': 12.5, 'can_heat': True, 'where_id': 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkR' + 'cBasUHCKLxFAg782GQma1gw', 'software_version': '4.1', 'ambient_temperature_c': 27.0, 'has_fan': True, 'ambient_temperature_f': 81, 'is_online': True, 'structure_id': 'fwo7ooZml1BE5o_zUEVOOAmD6p4' + '_KKcf5h-hyF9S9gGD8gz61GVajg', 'device_id': 'o4WARbb6TBa0Z81uC9faoLuE3_EunExt', 'target_temperature_c': 21.0, 'name': 'Living Room (5DC5)', 'can_cool': True, 'target_temperature_f': 70, 'fan_timer_active': False, 'is_using_emergency_heat': False, 'target_temperature_low_c': 19.0, 'target_temperature_low_f': 66, 'hvac_mode': 'heat', 'target_temperature_high_f': 79, 'name_long': 'Living Room Thermostat (5DC5)', 'target_temperature_high_c': 26.0, 'has_leaf': True}}}}} dict_two = { 'path': '/', 'data': { 'structures': { 'fwo7ooZml1BE5o_zUEVOOAmD6p4_K' + 'Kcf5h-hyF9S9gGD8gz61GVajg': { 'name': 'Chave', 'away': 'home', 'time_zone': 'Europe/Paris', 'smoke_co_alarms': [ 'kpv19WDjBwPi-fbhzZ5CpbuE3_EunExt' ], 'postal_code': '13006', 'thermostats': ['o4WARbb6TBa0Z81uC9faoLuE3_EunExt', 'o4WARbb6TBZmNT32aMeJ8ruE3_EunExt'], 'country_code': 'FR', 'structure_id': 'fwo7ooZml1BE5o_zUEVOOAmD6p4_KKcf5h' + '-hyF9S9gGD8gz61GVajg', 'wheres': { 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcBasUHCKLxFAZnU' + '3k8GF90g': { 'where_id': 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcB' + 'asUHCKLxFAZnU3k8GF90g', 'name': 'Entryway'}}}}, 'devices': { 'thermostats': { 'o4WARbb6TBa0Z81uC9faoLuE3_EunExt': { 'locale': 'fr-CA', 'hvac_state': 'cooling', 'away_temperature_high_c': 24.0, 'humidity': 50, 'away_temperature_high_f': 76, 'away_temperature_low_f': 55, 'temperature_scale': 'C', 'away_temperature_low_c': 12.5, 'can_heat': True, 'where_id': 'UDex0umsLcPn9ADdpOYzBnIjWcVYlkRcBa' + 'sUHCKLxFAg782GQma1gw', 'software_version': '4.1', 'ambient_temperature_c': 29.0, 'has_fan': True, 'ambient_temperature_f': 81, 'is_online': True, 'structure_id': 'fwo7ooZml1BE5o_zUEVOOAmD6p4_K' + 'Kcf5h-hyF9S9gGD8gz61GVajg', 'device_id': 'o4WARbb6TBa0Z81uC9faoLuE3_EunExt', 'target_temperature_c': 21.0, 'name': 'Living Room (5DC5)', 'can_cool': True, 'target_temperature_f': 70, 'fan_timer_active': False, 'is_using_emergency_heat': False, 'target_temperature_low_c': 19.0, 'target_temperature_low_f': 66, 'hvac_mode': 'heat', 'target_temperature_high_f': 79, 'name_long': 'Living Room Thermostat (5DC5)', 't
import traceback from flask import (
Blueprint, current_app, jsonify, render_template, ) bp = Blueprint('patilloid', __name__) @bp.route('/', methods=('GET',)) def index(): try: current_app.logger.info("Let's show them Patilloid!") return render_template('patilloid.html') except Exception as err: current_app.logger.error(err) cur
rent_app.logger.error(traceback.format_exc()) return ( jsonify({"error": "Sorry, something bad happened with your request."}), 400, )
#!/usr/bin/env python ''' Using Arista's pyeapi, create a script that allows you to add a VLAN (both the VLAN ID and the VLAN name). Your script should first check that the VLAN ID is available and only add the VLAN if it doesn't already exist. Use VLAN IDs between 100 and 999. You should be able to call the script from the command line as follows: python eapi_vlan.py --name blue 100 # add VLAN100, name blue If you call the script with the --remove option, the VLAN will be removed. python eapi_vlan.py --remove 100 # remove VLAN100 Once again only remove the VLAN if it exists on the switch. You will probably want to use Python's argparse to accomplish the argument processing. ''' import pyeapi import argparse def pyeapi_result(output): ''' Return the 'result' value from the pyeapi output ''' return output[0]['result'] def check_vlan_exists(eapi_conn, vlan_id): ''' Check if the given VLAN exists Return either vlan_name or False ''' vlan_id = str(vlan_id) cmd = 'show vlan id {}'.format(vlan_id) try: response = eapi_conn.enable(cmd) check_vlan = pyeapi_result(response)['vlans'] return check_vlan[vlan_id]['name'] except (pyeapi.eapilib.CommandError, KeyError): pass return False def configure_vlan(eapi_conn, vlan_id, vlan_name=None): ''' Add the given vlan_id to the switch Set the vlan_name (if provided) Note, if the vlan already exists, then this will just set the vlan_name ''' command_str1 = 'vlan {}'.format(vlan_id) cmd = [command_str1] if vlan_name is not None: command_str2 = 'name {}'.format(vlan_name) cmd.append(command_str2) return eapi_conn.config(cmd) def main(): ''' Add/remove vlans from Arista switch in an idempotent manner ''' ea
pi_conn = pyeapi.connect_to("pynet-sw2") # Argument parsing parser = argparse.ArgumentParser( description="Idempotent addition/removal of VLAN to Arista switch" ) parser.add_argument("vlan_id", help="VLAN number to create or remove", action="store", type=int) parser.add_argument( "--name", help="Specify VLAN name", action="store", dest="vlan_name",
type=str ) parser.add_argument("--remove", help="Remove the given VLAN ID", action="store_true") cli_args = parser.parse_args() vlan_id = cli_args.vlan_id remove = cli_args.remove vlan_name = cli_args.vlan_name # Check if VLAN already exists check_vlan = check_vlan_exists(eapi_conn, vlan_id) # check if action is remove or add if remove: if check_vlan: print "VLAN exists, removing it" command_str = 'no vlan {}'.format(vlan_id) eapi_conn.config([command_str]) else: print "VLAN does not exist, no action required" else: if check_vlan: if vlan_name is not None and check_vlan != vlan_name: print "VLAN already exists, setting VLAN name" configure_vlan(eapi_conn, vlan_id, vlan_name) else: print "VLAN already exists, no action required" else: print "Adding VLAN including vlan_name (if present)" configure_vlan(eapi_conn, vlan_id, vlan_name) if __name__ == "__main__": main()
#!/usr/bin/env python # -*-
coding: utf-8 -*- """ In this problem set you work with cities infobox data, audit it, come up with a cleaning idea and then clean it up. Since in the previous quiz you made a decision on which value to keep for the "areaLand" field, you now know what has to be done. Finish the function fix_area(). It will receive a string as an input, and it has to return a float representing the value of the area or None. You have to change the functio
n fix_area. You can use extra functions if you like, but changes to process_file will not be taken into account. The rest of the code is just an example on how this function can be used. """ import codecs import csv import json import pprint CITIES = 'cities.csv' def fix_area(area): # YOUR CODE HERE if area == 'NULL': return None elif area.startswith('{'): area = area.replace('{', '') if area.endswith('}'): area = area.replace('}', '') dataList = area.split('|') retArea = '' for data in dataList: if len(data) > len(retArea): retArea = str(data) return float(retArea) else: return float(area) global_name = ['areaLand', 'name', 'areaMetro', 'populationTotal', 'postalCode'] def process_file(filename, key): # CHANGES TO THIS FUNCTION WILL BE IGNORED WHEN YOU SUBMIT THE EXERCISE data = [] with open(filename, "r") as f: reader = csv.DictReader(f) #skipping the extra matadata for i in range(3): l = reader.next() # processing file for line in reader: # calling your function to fix the area value if key in line: line[key] = fix_area(line[key]) data.append(line) return data def test(): nameNum = 0 data = process_file(CITIES, global_name[nameNum]) print "Printing three example results:" for n in range(5,8): pprint.pprint(data[n][global_name[nameNum]]) #assert data[8][global_name[1]] == 55166700.0 #assert data[3][global_name[1]] == None if __name__ == "__main__": test()
umn_name] = Content((self._Nmax - self._Nmin + 1, self._len_testing_set)) self._columns_not_implemented[column_name] = None # will be set to a bool self._rows_not_implemented[column_name] = { n: None for n in range(self._Nmax - self._Nmin + 1)} # will be set to a bool if group_name not in self._groups: self._groups[group_name] = list() self._group_names_sorted.append(group_name) # preserve the ordering provided by the user self._groups[group_name].append(column_name) if isinstance(operations, str): self._columns_operations[column_name] = (operations,) elif isinstance(operations, tuple): self._columns_operations[column_name] = operations else: raise ValueError("Invalid operation in PerformanceTable") @classmethod def suppress_group(cls, group_name): cls._suppressed_groups.append(group_name) @classmethod def clear_suppressed_groups(cls): cls._suppressed_groups = list() @classmethod def preprocess_setitem(cls, group_name, function): cls._preprocessor_setitem[group_name] = function @classmethod def clear_setitem_preprocessing(cls): cls._preprocessor_setitem.clear() def __getitem__(self, args): assert len(args) == 3 column_name = args[0] N = args[1] mu_index = args[2] assert self._columns_not_implemented[column_name] in (True, False) assert self._rows_not_implemented[column_name][N - self._Nmin] in (True, False) if (not self._columns_not_implemented[column_name] and not self._rows_not_implemented[column_name][N - self._Nmin]): return self._columns[column_name][N - self._Nmin, mu_index] else: return CustomNotImplementedAfterDiv def __setitem__(self, args, value): assert len(args) == 3 column_name = args[0] N = args[1] mu_index = args[2] if is_not_implemented(value): assert self._columns_not_implemented[column_name] in (None, True, False) if self._columns_not_implemented[column_name] is None: self._columns_not_implemented[column_name] = True assert self._rows_not_implemented[column_name][N - self._Nmin] in (None, True) if self._rows_not_implemented[column_name][N - self._Nmin] is None: self._rows_not_implemented[column_name][N - self._Nmin] = True else: assert self._columns_not_implemented[column_name] in (None, True, False) if self._columns_not_implemented[column_name] in (None, True): self._columns_not_implemented[column_name] = False assert self._rows_not_implemented[column_name][N - self._Nmin] in (None, False) if self._rows_not_implemented[column_name][N - self._Nmin] is None: self._rows_not_implemented[column_name][N - self._Nmin] = False if column_name not in self._preprocessor_setitem: self._columns[column_name][N - self._Nmin, mu_index] = value else: self._columns[column_name][N - self._Nmin, mu_index] = self._preprocessor_setitem[column_name](value) def _process(self): groups_content = collections.OrderedDict() for group in self._group_names_sorted: # Skip suppresed groups if group in self._suppressed_groups: continue # Populate all columns columns = list() for column in self._groups[group]: assert self._columns_not_implemented[column] in (True, False) if self._columns_not_implemented[column] is False: columns.append(column) if l
en(columns) == 0: continue # Storage for print table_index = list() # of strings table_header = dict() # from string to string table_content = dict() # from string to Content array column_size = dict() # from string to int # First column should be the reduced space dimension table_index.append("N") table_header["N"] = "N" table_content["N"] = list(range(self._Nmin, self._N
max + 1)) column_size["N"] = max([max([len(str(x)) for x in table_content["N"]]), len("N")]) # Then fill in with postprocessed data for column in columns: for operation in self._columns_operations[column]: # Set header if operation in ("min", "max"): current_table_header = operation + "(" + column + ")" current_table_index = operation + "_" + column elif operation == "mean": current_table_header = "gmean(" + column + ")" current_table_index = "gmean_" + column else: raise ValueError("Invalid operation in PerformanceTable") table_index.append(current_table_index) table_header[current_table_index] = current_table_header # Compute the required operation of each column over the second index (testing set) table_content[current_table_index] = Content((self._Nmax - self._Nmin + 1,)) for n in range(self._Nmin, self._Nmax + 1): assert self._rows_not_implemented[column][n - self._Nmin] in (None, True, False) if self._rows_not_implemented[column][n - self._Nmin] is False: if operation == "min": current_table_content = min(self._columns[column][n - self._Nmin, :]) elif operation == "mean": data = self._columns[column][n - self._Nmin, :] if not data.any(): # all zeros current_table_content = 0. else: data[data == 0.] = sys.float_info.epsilon current_table_content = exp(mean(log(data))) elif operation == "max": current_table_content = max(self._columns[column][n - self._Nmin, :]) else: raise ValueError("Invalid operation in PerformanceTable") table_content[current_table_index][n - self._Nmin] = current_table_content else: table_content[current_table_index][n - self._Nmin] = nan # Get the width of the columns column_size[current_table_index] = max([max([ len(str(x)) for x in table_content[current_table_index]]), len(current_table_header)]) # Save content assert group not in groups_content groups_content[group] = (table_index, table_header, table_content, column_size) return groups_content def __str__(self): groups_content = self._process() output = "" for (group, (table_index, table_header, table_content, column_size)) in groups_content.items(): table_index_without_N = table_index[1:] # Prepare formatter for string conversion formatter = "" for (column_index, column_name) in enumerate(table_index): formatter += "{" + str(column_index) + ":<{" + column_name + "}}" if column_index < len(table_index) - 1: formatter += "\t" # Print the header current_line = list() for t in table_index: current_line.append(table_header[t]) output += formatter.format(*current_line, **column_size) + "\n" # Print the current row, only if its content was set to NotImplemented for n in range(self._Nmin, self._Nmax + 1):
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2019, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: kubevirt_rs short_description: Manage KubeVirt virtual machine replica sets description: - Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets. version_added: "2.8" author: KubeVirt Team (@kubevirt) options: state: description: - Create or delete virtual machine replica sets. default: "present" choices: - present - absent type: str name: description: - Name of the virtual machine replica set. required: true type: str namespace: description: - Namespace where the virtual machine replica set exists. required: true type: str selector: description: - "Selector is a label query over a set of virtual machine." required: true type: dict replicas: description: - Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. - Replicas defaults to 1 if newly created replica set. type: int extends_documentation_fragment: - k8s_auth_options - kubevirt_vm_options - kubevirt_common_options requirements: - python >= 2.7 - openshift >= 0.8.2 ''' EXAMPLES = ''' - name: Create virtual machine replica set 'myvmir' kubevirt_rs: state: presnet name: myvmir namespace: vms wait: true replicas: 3 memory: 64M labels: myvmi: myvmi selector: matchLabels: myvmi: myvmi disks: - name: containerdisk volume: containerDisk: image: kubevirt/cirros-container-disk-demo:latest path: /custom-disk/cirros.img disk: bus: virtio - name: Remove virtual machine replica set 'myvmir' kubevirt_rs: state: absent name: myvmir namespace: vms wait: true ''' RETURN = ''' kubevirt_rs: description: - The virtual machine virtual machine replica set managed by the user. - "This dictionary contains all values returned by the KubeVirt API all options are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)" returned: success type: complex contains: {} ''' import copy import traceback from ansible.module_utils.k8s.common import AUTH_ARG_SPEC try: from openshift.dynamic.client import ResourceInstance except ImportError: # Handled in module_utils pass from ansible.module_utils.kubevirt import ( virtdict, KubeVirtRawModule, VM_COMMON_ARG_SPEC, ) KIND = 'VirtualMachineInstanceReplicaSet' VMIR_ARG_SPEC = { 'replicas': {'type': 'int'}, 'selector': {'type': 'dict'}, } class KubeVirtVMIRS(KubeVirtRawModule): @property def argspec(self): """ argspec property builder """ argument_spec = copy.deepcopy(AUTH_ARG_SPEC) argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC)) argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC)) return argument_spec def _read_stream(self, resource, watcher, stream, name, replicas): """ Wait for ready_replicas to equal the requested number of replicas. """ if self.params.get('state') == 'absent': # TODO: Wait for absent return return_obj = None for event in stream: if event.get('object'): obj = ResourceInstance(resource, event['object']) if obj.metadata.name == name and hasattr(obj, 'status'): if replicas == 0: if not hasattr(obj.status, 'readyReplicas') or not obj.status.readyReplicas: return_obj = obj watcher.stop() break if hasattr(obj.status, 'readyReplicas') and obj.status.readyReplicas == replicas: return_obj = obj watcher.stop() break if not return_obj: self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.") if replicas and return_obj.status.readyReplicas is None: self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.") if replicas and return_obj.status.readyReplicas != replicas: self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within " "the wait_timeout period.".format(return_obj.status.ready_replicas, replicas)) return return_obj.to_dict() def wait_for_replicas(self): namespace = self.params.get('namespace') wait_timeout = self.params.get('wait_timeout') replicas = self.params.get('replicas') name = self.name resource = self.find_supported_resource(KIND) w, stream = self._create_stream(resource, namespace, wait_timeout) return self._read_stream(resource, w, stream, name, replicas) def execute_module(self): # Parse parameters specific for this module: definition = virtdict()
selector = self.params.get('selector') replicas = self.params.get('replicas') if selector: definition['spec']['selector'] = selector
if replicas is not None: definition['spec']['replicas'] = replicas # Execute the CURD of VM: template = definition['spec']['template'] dummy, definition = self.construct_vm_definition(KIND, definition, template) result_crud = self.execute_crud(KIND, definition) changed = result_crud['changed'] result = result_crud.pop('result') # Wait for the replicas: wait = self.params.get('wait') if wait: result = self.wait_for_replicas() # Return from the module: self.exit_json(**{ 'changed': changed, 'kubevirt_rs': result, 'result': result_crud, }) def main(): module = KubeVirtVMIRS() try: module.execute_module() except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
# -*- coding: utf-8 -*- # Generated
by Django 1.11.2 on 2017-08-27 23:09 from __future__ import unicode_li
terals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("studies", "0024_merge_20170823_1352"), ("studies", "0029_auto_20170825_1505"), ] operations = []
#!/usr/bin/python import usb.core import usb.util import serial import socket from escpos import * from constants import * from exceptions import * from time import sleep class Usb(Escpos): """ Define USB printer """ def __init__(self, idVendor, idProduct, interface=0, in_ep=0x82, out_ep=0x01): """ @param idVendor : Vendor ID @param idProduct : Product ID @param interface : USB device interface @param in_ep : Input end point @param out_ep : Output end point """ self.errorText = "ERROR PRINTER\n\n\n\n\n\n"+PAPER_FULL_CUT self.idVendor = idVendor self.idProduct = idProduct self.interface = interface self.in_ep = in_ep self.out_ep = out_ep self.open() def open(self): """ Search device on USB tree and set is as escpos device """ self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct) if self.device is None: raise NoDeviceError() try: if self.device.is_kernel_driver_active(self.interface): self.device.detach_kernel_driver(self.interface) self.device.set_configuration() usb.util.claim_interface(self.device, self.interface) except usb.core.USBError as e: raise HandleDeviceError(e) def close(self): i = 0 while True: try: if not self.device.is_kernel_driver_active(self.interface): usb.util.release_interface(self.device, self.interface) self.device.attach_kernel_driver(self.interface) usb.util.dispose_resources(self.device) else:
self.device = None return True except usb.core.USBError as e: i += 1 if i > 10: return False sleep(0.1) def _raw(self, msg): """ Print any command sent in raw format """ if len(msg) != self.device.write(self.out_ep, msg, self.interfa
ce): self.device.write(self.out_ep, self.errorText, self.interface) raise TicketNotPrinted() def __extract_status(self): maxiterate = 0 rep = None while rep == None: maxiterate += 1 if maxiterate > 10000: raise NoStatusError() r = self.device.read(self.in_ep, 20, self.interface).tolist() while len(r): rep = r.pop() return rep def get_printer_status(self): status = { 'printer': {}, 'offline': {}, 'error' : {}, 'paper' : {}, } self.device.write(self.out_ep, DLE_EOT_PRINTER, self.interface) printer = self.__extract_status() self.device.write(self.out_ep, DLE_EOT_OFFLINE, self.interface) offline = self.__extract_status() self.device.write(self.out_ep, DLE_EOT_ERROR, self.interface) error = self.__extract_status() self.device.write(self.out_ep, DLE_EOT_PAPER, self.interface) paper = self.__extract_status() status['printer']['status_code'] = printer status['printer']['status_error'] = not ((printer & 147) == 18) status['printer']['online'] = not bool(printer & 8) status['printer']['recovery'] = bool(printer & 32) status['printer']['paper_feed_on'] = bool(printer & 64) status['printer']['drawer_pin_high'] = bool(printer & 4) status['offline']['status_code'] = offline status['offline']['status_error'] = not ((offline & 147) == 18) status['offline']['cover_open'] = bool(offline & 4) status['offline']['paper_feed_on'] = bool(offline & 8) status['offline']['paper'] = not bool(offline & 32) status['offline']['error'] = bool(offline & 64) status['error']['status_code'] = error status['error']['status_error'] = not ((error & 147) == 18) status['error']['recoverable'] = bool(error & 4) status['error']['autocutter'] = bool(error & 8) status['error']['unrecoverable'] = bool(error & 32) status['error']['auto_recoverable'] = not bool(error & 64) status['paper']['status_code'] = paper status['paper']['status_error'] = not ((paper & 147) == 18) status['paper']['near_end'] = bool(paper & 12) status['paper']['present'] = not bool(paper & 96) return status def __del__(self): """ Release USB interface """ if self.device: self.close() self.device = None class Serial(Escpos): """ Define Serial printer """ def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1): """ @param devfile : Device file under dev filesystem @param baudrate : Baud rate for serial transmission @param bytesize : Serial buffer size @param timeout : Read/Write timeout """ self.devfile = devfile self.baudrate = baudrate self.bytesize = bytesize self.timeout = timeout self.open() def open(self): """ Setup serial port and set is as escpos device """ self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True) if self.device is not None: print "Serial printer enabled" else: print "Unable to open serial printer on: %s" % self.devfile def _raw(self, msg): """ Print any command sent in raw format """ self.device.write(msg) def __del__(self): """ Close Serial interface """ if self.device is not None: self.device.close() class Network(Escpos): """ Define Network printer """ def __init__(self,host,port=9100): """ @param host : Printer's hostname or IP address @param port : Port to write to """ self.host = host self.port = port self.open() def open(self): """ Open TCP socket and set it as escpos device """ self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.device.connect((self.host, self.port)) if self.device is None: print "Could not open socket for %s" % self.host def _raw(self, msg): self.device.send(msg) def __del__(self): """ Close TCP connection """ self.device.close()
#!/usr/bin/env python # a script to delete the contents of an s3 buckets # import the sys and boto3 modules import sys import boto3 # create an s3 resource s3 = boto3.resource('s3') # iterate over the script arguments as bucket names for bucket_name in sys.argv[1:]: # use the bucket name to create a bucket object bucket = s3.Bucket(bucket_name) # delete the bucket's conte
nts and print the response or error
for key in bucket.objects.all(): try: response = key.delete() print response except Exception as error: print error
#!/usr/bin
/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Symplicity.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv
)
#!/usr/bin/env python # # Wrapper scr
ipt for Java Conda packages that ensures that the java runtime # is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128). # # # Program Parameters # import os import sys import subprocess from os import access, getenv, X_OK jar_file = 'hops0.31.jar' default_jvm_mem_opts = ['-Xms1g', '-Xmx2g'] # !!! End of parameter section. No use
r-serviceable code below this line !!! def real_dirname(path): """Return the symlink-resolved, canonicalized directory-portion of path.""" return os.path.dirname(os.path.realpath(path)) def java_executable(): """Return the executable name of the Java interpreter.""" java_home = getenv('JAVA_HOME') java_bin = os.path.join('bin', 'java') if java_home and access(os.path.join(java_home, java_bin), X_OK): return os.path.join(java_home, java_bin) else: return 'java' def jvm_opts(argv): """Construct list of Java arguments based on our argument list. The argument list passed in argv must not include the script name. The return value is a 3-tuple lists of strings of the form: (memory_options, prop_options, passthrough_options) """ mem_opts = [] prop_opts = [] pass_args = [] for arg in argv: if arg.startswith('-D'): prop_opts.append(arg) elif arg.startswith('-XX'): prop_opts.append(arg) elif arg.startswith('-Xm'): mem_opts.append(arg) else: pass_args.append(arg) # In the original shell script the test coded below read: # if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ] # To reproduce the behaviour of the above shell code fragment # it is important to explictly check for equality with None # in the second condition, so a null envar value counts as True! if mem_opts == [] and getenv('_JAVA_OPTIONS') == None: mem_opts = default_jvm_mem_opts return (mem_opts, prop_opts, pass_args) def main(): java = java_executable() jar_dir = real_dirname(sys.argv[0]) (mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:]) if pass_args != [] and pass_args[0].startswith('eu'): jar_arg = '-cp' else: jar_arg = '-jar' jar_path = os.path.join(jar_dir, jar_file) java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args if '--jar_dir' in sys.argv[1:]: print(jar_path) else: sys.exit(subprocess.call(java_args)) if __name__ == '__main__': main()
#!/usr/bin/env python import os import sys import argparse import pat3dem.star as p3s def main(): progname = os.path.basename(sys.argv[0]) usage = progname + """ [options] <a star file> Write two star files after screening by an item and a cutoff in the star file. Write one star file after screening by a file containing blacklist/whitelist (either keyword or item). """ args_def = {'screen':'0', 'cutoff':'00', 'sfile':'0', 'white':0} parser = argparse.ArgumentParser() parser.add_argument("star", nargs='*', help="specify a star file to be screened") parser.add_argument("-s", "--screen", type=str, help="specify the item, by which the star file will be screened, by default {} (no screening). e.g., 'OriginX'".format(args_def['screen'])) parser.add_argument("-c", "--cutoff", type=str, help="specify the cutoff, by default '{}' (-s and -sf will be combined)".format(args_def['cutoff'])) parser.add_argument("-sf", "--sfile", type=str, help="specify a file containing a keyword each line, by default '{}' (no screening). e.g., 'f.txt'".format(args_def['sfile'])) parser.add_argument("-w", "--white", type=int, help="specify as 1 if you provide a whitelist in -sf".format(args_def['white'])) args = parser.parse_args() if len(sys.argv) == 1: print "usage: " + usage print "Please run '" + progname + " -h' for detailed options." sys.exit(1) # get default values for i in args_def: if args.__dict__[i] == None: args.__dict__[i] = args_def[i] # preprocess -sf if args.sfile != '0': lines_sf = open(args.sfile).readlines() lines_sfile = [] for line in lines_sf: line = line.strip() if line != '': lines_sfile += [line] # get the star file star = args.star[0] basename = os.path.basename(os.path.splitext(star)[0]) star_dict = p3s.star_parse(star, 'data_') header = star_dict['data_'] + star_dict['loop_'] header_len = len(header) with open(star) as read_star: lines = read_star.readlines()[header_len:-1] if args.screen != '0': # get the sc number scn = star_dict['_rln'+args.screen] if args.cu
toff != '00': # Name the output files screened1 = '{}_screened_{}-gt-{}.star'.format(basename, args.screen, args.cutoff) screened2 = '{}_screened_{}-le-{}.star'.format(basename, args.screen, args.cutoff) write_screen1 = open(screened1, 'w') write_screen1.write(''.join(header)) write_screen2 = open(screened2, 'w') write_screen2.write(''.join(header)) for line in lines: if float(line.split()[scn]) > float(args.cutoff):
write_screen1.write(line) else: write_screen2.write(line) write_screen1.write(' \n') write_screen1.close() write_screen2.write(' \n') write_screen2.close() print 'The screened star files have been written in {} and {}!'.format(screened1, screened2) elif args.sfile != '0': with open('{}_screened.star'.format(basename), 'w') as write_screen: write_screen.write(''.join(header)) if args.white == 0: for line in lines: key = line.split()[scn] if key not in lines_sfile: print 'Include {}.'.format(key) write_screen.write(line) else: for line in lines: key = line.split()[scn] if key in lines_sfile: print 'Include {}.'.format(key) write_screen.write(line) write_screen.write(' \n') elif args.sfile != '0': with open('{}_screened.star'.format(basename), 'w') as write_screen: write_screen.write(''.join(header)) if args.white == 0: for line in lines: skip = 0 for key in lines_sfile: if key in line: skip = 1 print 'Skip {}.'.format(key) break if skip == 0: write_screen.write(line) else: for line in lines: for key in lines_sfile: if key in line: print 'Include {}.'.format(key) write_screen.write(line) break write_screen.write(' \n') if __name__ == '__main__': main()
#*** #********************************************************************* #************************************************************************* #*** #*** GizmoDaemon Config Script #*** LIRCMceUSB2 MythTV config #*** #***************************************** #***************************************** #*** """ Copyright (c) 2007, Gizmo Daemon Team Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ ############################ # Imports ########################## from GizmoDaemon import * from GizmoScriptActiveApplication import * from GizmoScriptAltTabber import * import subprocess ENABLED = True VERSION_NEEDED = 3.2 INTERESTED_CLASSES = [GizmoEventClass.LIRC] INTERESTED_WINDOWS = ["mythfrontend"] USES_LIRC_REMOTES = ["mceusb", "mceusb2"] POWER_APPLICATION = "mythfrontend" ############################ # LIRCMceUSB2MythTV Class definition ########################## class LIRCMceUSB2MythTV(GizmoScriptActiveApplication): """ MythTV LIRC Event Mapping for the MceUSB2 remote """ ############################ # Public Functions ########################## def onDeviceEvent(self, Event, Gizmo = None): """ Called from Base Class' onEvent method. See GizmodDispatcher.onEvent documention for an explanation of this function """ # if the event isn't from the remote we're interested in don't handle it if Event.Remote not in USES_LIRC_REMOTES: return False # process the key if Event.Button == "Power": # if mythfrontend is open, kill it subprocess.Popen(["killall", "mythfrontend"]) return True elif Event.Button == "TV": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_A) return True elif Event.Button == "Music": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_B) return True elif Event.Button == "Pictures": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH) return True elif Event.Button == "Videos": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH, [GizmoKey.KEY_RIGHTSHIFT]) return True elif Event.Button == "Stop": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S) return True elif Event.Button == "Record": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_R) return True elif Event.Button == "Pause": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P) return True elif Event.Button == "Rewind": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_COMMA, [GizmoKey.KEY_RIGHTSHIFT]) return True elif Event.Button == "Play": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P) return True elif Event.Button == "Forward": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOT, [GizmoKey.KEY_RIGHTSHIFT]) return True elif Event.Button == "Replay": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGEUP) return True elif Event.Button == "Back": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ESC) return True elif Event.Button == "Up": return False elif Event.Button == "Skip": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGEDOWN) return True elif Event.Button == "More": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_M) return True elif Event.Button == "Left": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFT) return True elif Event.Button == "OK": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ENTER) return True elif Event.Button == "Right": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHT) return True elif Event.Button == "Down": return False elif Event.Button == "VolUp": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHTBRACE) return True elif Event.Button == "VolDown": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFTBRACE) return True elif Event.Button == "Home": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_END) return True elif Event.Button == "ChanUp": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_UP) return True elif Event.Button == "ChanDown": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOWN) return True elif Event.Button == "RecTV": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_HOME) return True elif Event.Button == "Mute": return False elif Event.Button == "DVD": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_H) return True elif Event.Button == "Guide": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S) return True elif Event.Button == "LiveTV": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_N) return True elif Event.Button == "One": return False elif Event.Button == "Two": return False elif Event.Button == "Three": return False elif Event.Button == "Four": return False elif Event.Button == "Five": return False elif Event.Button == "Six": return False elif Event.Button == "Seven": return False elif Event.Button == "Eight": return False elif Event.Button == "Nine": retu
rn False elif Event.Button == "Star": return False elif Event.Button == "Zero": return False elif Event.Button == "Hash": return False elif Event.Button
== "Clear": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_C) return True elif Event.Button == "Enter": Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_I) return True else: # unmatched event, keep processing return False def onEvent(self, Event, Gizmo = None): """ Overloading Base Class' onEvent method! Make sure to call it! """ # check for power button # if pressed and mythfrontend isn't running, then launch it # also return False so that other scripts may make use of the power # button as well if Event.Class in self.InterestedClasses \ and Event.Remote in USES_LIRC_REMOTES \ and Event.Button == "Power" \ and Gizmod.isProcessRunning(POWER_APPLICATION) < 0: subprocess.Popen([POWER_APPLICATION]) Gizmod.updateProcessTree() # force an instantaneous process tree update return False # call base classe' onEvent method return GizmoScriptActiveApplication.onEvent(self, Event, Gizmo) ############################ # Private Functions ########################## def __init__(self): """ Default Constructor """ GizmoScriptActiveApplication.__init__(self, ENABLED, VERSION_NEEDED, INTERESTED_CLASSES, INTERESTED_WINDOWS) ############################ # LIRCMceUSB2MythTV class end ########################## # register the user script LIRCMceUSB2MythTV()
""" Test DarwinLog "source include debug-level" functionality provided by the StructuredDataDarwinLog plugin. These tests are currently only supported when running against Darwin targets. """ import lldb import platform import re from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbtest_config class DarwinNSLogOutputTestCase(TestBase): NO_DEBUG_INFO_TESTCASE = True mydir = TestBase.compute_mydir(__file__) @skipUnlessDarwin @skipIfRemote # this test is currently written using lldb commands & assumes running on local system def setUp(self): # Call super's setUp(). TestBase.setUp(self) self.child = None self.child_prompt = '(lldb) ' self.strict_sources = False # Source filename. self.source = 'main.m' # Output f
ilename. self.exe_name = self.getBuildArtifact("a.out") self.d = {'OBJC_SOURCES': self.source, 'EXE': self.exe_name}
# Locate breakpoint. self.line = line_number(self.source, '// break here') def tearDown(self): # Shut down the process if it's still running. if self.child: self.runCmd('process kill') self.expect_prompt() self.runCmd('quit') # Let parent clean up super(DarwinNSLogOutputTestCase, self).tearDown() def run_lldb_to_breakpoint(self, exe, source_file, line, settings_commands=None): # Set self.child_prompt, which is "(lldb) ". prompt = self.child_prompt # So that the child gets torn down after the test. import pexpect import sys if sys.version_info.major == 3: self.child = pexpect.spawnu('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) else: self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec, self.lldbOption, exe)) child = self.child # Turn on logging for what the child sends back. if self.TraceOn(): child.logfile_read = sys.stdout # Disable showing of source lines at our breakpoint. # This is necessary for the logging tests, because the very # text we want to match for output from the running inferior # will show up in the source as well. We don't want the source # output to erroneously make a match with our expected output. self.runCmd("settings set stop-line-count-before 0") self.expect_prompt() self.runCmd("settings set stop-line-count-after 0") self.expect_prompt() # Run any test-specific settings commands now. if settings_commands is not None: for setting_command in settings_commands: self.runCmd(setting_command) self.expect_prompt() # Set the breakpoint, and run to it. child.sendline('breakpoint set -f %s -l %d' % (source_file, line)) child.expect_exact(prompt) child.sendline('run') child.expect_exact(prompt) # Ensure we stopped at a breakpoint. self.runCmd("thread list") self.expect(re.compile(r"stop reason = .*breakpoint")) def runCmd(self, cmd): if self.child: self.child.sendline(cmd) def expect_prompt(self, exactly=True): self.expect(self.child_prompt, exactly=exactly) def expect(self, pattern, exactly=False, *args, **kwargs): if exactly: return self.child.expect_exact(pattern, *args, **kwargs) return self.child.expect(pattern, *args, **kwargs) def do_test(self, expect_regexes=None, settings_commands=None): """ Run a test. """ self.build(dictionary=self.d) self.setTearDownCleanup(dictionary=self.d) exe = self.getBuildArtifact(self.exe_name) self.run_lldb_to_breakpoint(exe, self.source, self.line, settings_commands=settings_commands) self.expect_prompt() # Now go. self.runCmd("process continue") self.expect(expect_regexes) def test_nslog_output_is_displayed(self): """Test that NSLog() output shows up in the command-line debugger.""" self.do_test(expect_regexes=[ re.compile(r"(This is a message from NSLog)"), re.compile(r"Process \d+ exited with status") ]) self.assertIsNotNone(self.child.match) self.assertGreater(len(self.child.match.groups()), 0) self.assertEqual( "This is a message from NSLog", self.child.match.group(1)) def test_nslog_output_is_suppressed_with_env_var(self): """Test that NSLog() output does not show up with the ignore env var.""" # This test will only work properly on macOS 10.12+. Skip it on earlier versions. # This will require some tweaking on iOS. match = re.match(r"^\d+\.(\d+)", platform.mac_ver()[0]) if match is None or int(match.group(1)) < 12: self.skipTest("requires macOS 10.12 or higher") self.do_test( expect_regexes=[ re.compile(r"(This is a message from NSLog)"), re.compile(r"Process \d+ exited with status") ], settings_commands=[ "settings set target.env-vars " "\"IDE_DISABLED_OS_ACTIVITY_DT_MODE=1\"" ]) self.assertIsNotNone(self.child.match) self.assertEqual(len(self.child.match.groups()), 0)
# Copyright (c) 2015 The Phtevencoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Dummy Socks5 server for testing. ''' from __future__ import print_function, division, unicode_literals import socket, threading, Queue import traceback, sys ### Protocol constants class Command: CONNECT = 0x01 class AddressType: IPV4 = 0x01 DOMAINNAME = 0x03 IPV6 = 0x04 ### Utility functions def recvall(s, n): '''Receive n bytes from a socket, or fail''' rv = bytearray() while n > 0: d = s.recv(n) if not d: raise IOError('Unexpected end of stream') rv.extend(d) n -= len(d) return rv ### Implementation classes class Socks5Configuration(object): '''Proxy configuration''' def __init__(self): self.addr = None # Bind address (must be set) self.af = socket.AF_INET # Bind address family self.unauth = False # Support unauthenticated self.auth = False # Support authentication class Socks5Command(object): '''Information about an incoming socks5 command''' def __init__(self, cmd, atyp, addr, port, username, password): self.cmd = cmd # Command (one of Command.*) self.atyp = atyp # Address type (one of AddressType.*) self.addr = addr # Address self.port = port # Port to connect to self.username = username self.password = password def __repr__(self): return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password) class Socks5Connection(object): def __init__(self, serv, conn, peer): self.serv = serv self.conn = conn self.peer = peer def handle(self): ''' Handle socks5 request according to RFC1928 ''' try: # Verify socks version ver = recvall(self.conn, 1)[0] if ver != 0x05: raise IOError('Invalid socks version %i' % ver) # Choose authentication method nmethods = recvall(self.conn, 1)[0] methods = bytearray(recvall(self.conn, nmethods)) method = None if 0x02 in methods and self.serv.conf.auth: method = 0x02 # username/password elif 0x00 in methods and self.serv.conf.unauth: method = 0x00 # unauthenticated if method is None: raise IOError('No supported authentication method was offered') # Send response self.conn.sendall(bytearray([0x05, method])) # Read authentication (optional) username = None password = None if method == 0x02: ver = recvall(self.conn, 1)[0] if ver != 0x01: raise IOError('Invalid auth packet version %i' % ver) ulen = recvall(self.conn, 1)[0] username = str(
recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0] password = str(recvall(self.conn, plen)) # Send authentication response self.conn.sendall(bytearray([0x01, 0x00])) # Read connect request (ver,cmd,rsv,atyp) = recvall(self.conn, 4) if ver != 0x05: raise IOError('Invalid socks version %i in connect request' % ver) if cmd != Command.CONNECT: raise IOError('Unhandled command %i in connect request' % cmd) if atyp == AddressType.IPV4: addr = recvall(self.conn, 4) elif atyp == AddressType.DOMAINNAME: n = recvall(self.conn, 1)[0] addr = str(recvall(self.conn, n)) elif atyp == AddressType.IPV6: addr = recvall(self.conn, 16) else: raise IOError('Unknown address type %i' % atyp) port_hi,port_lo = recvall(self.conn, 2) port = (port_hi << 8) | port_lo # Send dummy response self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) cmdin = Socks5Command(cmd, atyp, addr, port, username, password) self.serv.queue.put(cmdin) print('Proxy: ', cmdin) # Fall through to disconnect except Exception,e: traceback.print_exc(file=sys.stderr) self.serv.queue.put(e) finally: self.conn.close() class Socks5Server(object): def __init__(self, conf): self.conf = conf self.s = socket.socket(conf.af) self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.s.bind(conf.addr) self.s.listen(5) self.running = False self.thread = None self.queue = Queue.Queue() # report connections and exceptions to client def run(self): while self.running: (sockconn, peer) = self.s.accept() if self.running: conn = Socks5Connection(self, sockconn, peer) thread = threading.Thread(None, conn.handle) thread.daemon = True thread.start() def start(self): assert(not self.running) self.running = True self.thread = threading.Thread(None, self.run) self.thread.daemon = True self.thread.start() def stop(self): self.running = False # connect to self to end run loop s = socket.socket(self.conf.af) s.connect(self.conf.addr) s.close() self.thread.join()
#!/usr/bin/env python # encoding: utf-8 # Copyright 2010 California Institute o
f Technology. ALL RIGHTS # RESERVED. U.S. Government Sponsorship acknowledged. from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler class DropHandler(
BaseHTTPRequestHandler): def dropRequest(self): self.send_response(200) self.send_header('Content-length', '0') self.send_header('Connection', 'close') self.end_headers() do_GET = do_POST = do_HEAD = do_PURGE = do_OPTIONS = do_PUT = do_DELETE = do_TRACE = do_CONNECT = dropRequest def main(): server = HTTPServer(('', 8989), DropHandler) server.serve_forever() if __name__ == '__main__': main()
import sys class DualModulusPrescaler: def __init__(self,p): self.m_p = p return def set_prescaler(self): return # may be internal def set_a(self,a): self.m_a = a return # may be internal def set_n(self,n): self.m_n = n return def set_ref_divider(self, r): self.m_r = r return def ge
t_ref_divider(self): return self.m_r def get_division_ratio(self): v = (self.m_p * self.m_n) + self.m_a return v class Osc: def __init__(self, ref_freq, prescaler): self.m_ref = ref_freq self.m_prescaler = prescaler return def get_fre
q(self): # print self.m_prescaler.get_division_ratio() return (self.m_ref/self.m_prescaler.get_ref_divider()) * self.m_prescaler.get_division_ratio() def calc_a(self): return def calc_n(self): return def get_counter_params(self,freq): x = freq * self.m_prescaler.get_ref_divider()/self.m_ref n = int(x/32) a = int(round(x-n*32)) encoded = (n<<7) + a return (n, a, encoded) def set_freq(self,freq): return class Danphone: def __init__(self,datalink): dmps = DualModulusPrescaler(32) #dmps.set_ref_divider(2048) dmps.set_ref_divider(1088) osc = Osc(12.8e6,dmps) print osc.get_counter_params(70.3529E6) sys.exit(0) for i in range(128): dmps.set_a(i) dmps.set_n(456) osc = Osc(12.8e6,dmps) print osc.get_freq()/1000000 return if __name__=="__main__": d = Danphone(None)
if self.source != otherpo.source or self.getcontext() != otherpo.getcontext(): self.markfuzzy() else: self.markfuzzy(otherpo.isfuzzy()) elif not otherpo.istranslated(): if self.source != otherpo.source: self.markfuzzy() else: if self.target != otherpo.target: self.markfuzzy() def isheader(self): #TODO: fix up nicely return not self.getid() and len(self.target) > 0 def isblank(self): if self.isheader() or self.msgidcomment: return False if (self._msgidlen() == 0) and
(self._msgstrlen() == 0) and len(self._msgctxt) == 0: return True return False def hastypecomment(sel
f, typecomment): """Check whether the given type comment is present""" # check for word boundaries properly by using a regular expression... return sum(map(lambda tcline: len(re.findall("\\b%s\\b" % typecomment, tcline)), self.typecomments)) != 0 def hasmarkedcomment(self, commentmarker): """Check whether the given comment marker is present as # (commentmarker) ...""" commentmarker = "(%s)" % commentmarker for comment in self.othercomments: if comment.startswith(commentmarker): return True return False def settypecomment(self, typecomment, present=True): """Alters whether a given typecomment is present""" if self.hastypecomment(typecomment) != present: if present: self.typecomments.append("#, %s\n" % typecomment) else: # this should handle word boundaries properly ... typecomments = map(lambda tcline: re.sub("\\b%s\\b[ \t,]*" % typecomment, "", tcline), self.typecomments) self.typecomments = filter(lambda tcline: tcline.strip() != "#,", typecomments) def istranslated(self): return super(pounit, self).istranslated() and not self.isobsolete() def istranslatable(self): return not (self.isheader() or self.isblank() or self.isobsolete()) def isfuzzy(self): return self.hastypecomment("fuzzy") def _domarkfuzzy(self, present=True): self.settypecomment("fuzzy", present) def makeobsolete(self): """Makes this unit obsolete""" self.sourcecomments = [] self.automaticcomments = [] super(pounit, self).makeobsolete() def hasplural(self): """returns whether this pounit contains plural strings...""" source = self.source return isinstance(source, multistring) and len(source.strings) > 1 def __str__(self): """convert to a string. double check that unicode is handled somehow here""" _cpo_unit = cpo.pounit.buildfromunit(self) return str(_cpo_unit) def getlocations(self): """Get a list of locations from sourcecomments in the PO unit. rtype: List return: A list of the locations with '#: ' stripped """ #TODO: rename to .locations return self.sourcecomments def addlocation(self, location): """Add a location to sourcecomments in the PO unit. :param location: Text location e.g. 'file.c:23' does not include #: :type location: String """ self.sourcecomments.append(location) def _extract_msgidcomments(self, text=None): """Extract KDE style msgid comments from the unit. :rtype: String :return: Returns the extracted msgidcomments found in this unit's msgid. """ if text: return pocommon.extract_msgid_comment(text) else: return self.msgidcomment def getcontext(self): """Get the message context.""" return self._msgctxt + self.msgidcomment def setcontext(self, context): context = data.forceunicode(context or u"") self._msgctxt = context def getid(self): """Returns a unique identifier for this unit.""" context = self.getcontext() # Gettext does not consider the plural to determine duplicates, only # the msgid. For generation of .mo files, we might want to use this # code to generate the entry for the hash table, but for now, it is # commented out for conformance to gettext. # id = '\0'.join(self.source.strings) id = self.source if self.msgidcomment: id = u"_: %s\n%s" % (context, id) elif context: id = u"%s\04%s" % (context, id) return id @classmethod def buildfromunit(cls, unit): """Build a native unit from a foreign unit, preserving as much information as possible.""" if type(unit) == cls and hasattr(unit, "copy") and callable(unit.copy): return unit.copy() elif isinstance(unit, pocommon.pounit): newunit = cls(unit.source) newunit.target = unit.target #context newunit.msgidcomment = unit._extract_msgidcomments() if not newunit.msgidcomment: newunit.setcontext(unit.getcontext()) locations = unit.getlocations() if locations: newunit.addlocations(locations) notes = unit.getnotes("developer") if notes: newunit.addnote(notes, "developer") notes = unit.getnotes("translator") if notes: newunit.addnote(notes, "translator") newunit.markfuzzy(unit.isfuzzy()) if unit.isobsolete(): newunit.makeobsolete() for tc in ['python-format', 'c-format', 'php-format']: if unit.hastypecomment(tc): newunit.settypecomment(tc) break return newunit else: return base.TranslationUnit.buildfromunit(unit) class pofile(pocommon.pofile): """A .po file containing various units""" UnitClass = pounit def _build_self_from_cpo(self): """Builds up this store from the internal cpo store. A user must ensure that self._cpo_store already exists, and that it is deleted afterwards.""" for unit in self._cpo_store.units: self.addunit(self.UnitClass.buildfromunit(unit)) self._encoding = self._cpo_store._encoding def _build_cpo_from_self(self): """Builds the internal cpo store from the data in self. A user must ensure that self._cpo_store does not exist, and should delete it after using it.""" self._cpo_store = cpo.pofile(noheader=True) for unit in self.units: if not unit.isblank(): self._cpo_store.addunit(cpo.pofile.UnitClass.buildfromunit(unit, self._encoding)) if not self._cpo_store.header(): #only add a temporary header self._cpo_store.makeheader(charset=self._encoding, encoding="8bit") def parse(self, input, duplicatestyle="merge"): """Parses the given file or file source string.""" try: if hasattr(input, 'name'): self.filename = input.name elif not getattr(self, 'filename', ''): self.filename = '' self.units = [] self._cpo_store = cpo.pofile(input, noheader=True) self._build_self_from_cpo() del self._cpo_store except Exception as e: raise base.ParseError(e) # duplicates are now removed by default unless duplicatestyle=allow if duplicatestyle != "allow": self.removeduplicates(duplicatestyle=duplicatestyle) def removeduplicates(self, duplicatestyle="merge"): """Make sure each msgid is unique. The value of duplicatestyle tells which action is performed to deal with duplicate entries. Valid values are: - merge -- Duplicate entries are merged together, - allow -- Duplicate entries are kept as is, - msgctxt -- A msgctxt is added to ensure duplicate entries are different.
import pytest import eagerpy as ep import foolbox as fbn def test_plot(dummy: ep.Tensor) -> None: # j
ust tests that the calls don't throw any errors images = ep.zeros(dummy, (10, 3, 32, 32)) fbn.plot.images(images) fbn.plot.images(images, n=3) fbn.plot.images(images, n=3, data_format="channels_first") fbn.plot.images(images, nrows=4) fbn.plot.images(images, ncols=3) fbn.plot.images(images, nrows=2, ncols=6) fbn.plot.images(images, nrows=2, ncols=4) # test for single channel images images = ep.zeros(dummy, (10, 32
, 32, 1)) fbn.plot.images(images) with pytest.raises(ValueError): images = ep.zeros(dummy, (10, 3, 3, 3)) fbn.plot.images(images) with pytest.raises(ValueError): images = ep.zeros(dummy, (10, 1, 1, 1)) fbn.plot.images(images) with pytest.raises(ValueError): images = ep.zeros(dummy, (10, 32, 32)) fbn.plot.images(images) with pytest.raises(ValueError): images = ep.zeros(dummy, (10, 3, 32, 32)) fbn.plot.images(images, data_format="foo")
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase import vtk cla
ss vtkFieldDataToAttributeDataFilter(SimpleVTKClassModuleBase): def __init__(self, module_manager): SimpleVTKClassModuleBase.__init__( self, module_manager, vtk.vtkFieldDataToAttributeDataFilter(), 'Processing.', ('vtkDataSet',), ('vtkDataSet',), replaceDoc=True, inputFunctions=None, outputFunction
s=None)
import codecs from ConfigParser import ConfigParser import os import subprocess import sys import six import twiggy from twiggy import log from twiggy.levels import name2level from xdg import BaseDirectory def asbool(some_value): """ Cast config values to boolean. """ return six.text_type(some_value).lower() in [ 'y', 'yes', 't', 'true', '1', 'on' ] def get_service_password(service, username, oracle=None, interactive=False): """ Retrieve the sensitive password for a service by: * retrieving password from a secure store (@oracle:use_keyring, default) * asking the password from the user (@oracle:ask_password, interactive) * executing a command and use the output as password (@oracle:eval:<command>) Note that the keyring may or may not be locked which requires that the user provides a password (interactive mode). :param service: Service name, may be key into secure store (as string). :param username: Username for the service (as string). :param oracle: Hint which password oracle strategy to use. :return: Retrieved password (as string) .. seealso:: https://bitbucket.org/kang/python-keyring-lib """ import getpass import keyring password = None if not oracle or oracle == "@oracle:use_keyring": password = keyring.get_password(service, username) if interactive and password is None: # -- LEARNING MODE: Password is not stored in keyring yet. oracle = "@oracle:ask_password" password = get_service_password(service, username, oracle, interactive=True) if password: keyring.set_password(service, username, password) elif interactive and oracle == "@oracle:ask_password": prompt = "%s password: " % service password = getpass.getpass(prompt) elif oracle.startswith('@oracle:eval:'): command = o
racle[13:] p = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, #stderr=subprocess.STDOUT ) password = p.stdout.read()[:-1] if password is None: die("MISSING PASSWORD: oracle='%s', interactive=%s for service=%s" % (oracle, interactive, service)) return password def load_example_rc(): fname = os.path.join(
os.path.dirname(__file__), 'docs/configuration.rst' ) with open(fname, 'r') as f: readme = f.read() example = readme.split('.. example')[1][4:] return example error_template = """ ************************************************* * There was a problem with your bugwarriorrc * * {msg} * Here's an example template to help: * ************************************************* {example}""" def die(msg): log.options(suppress_newlines=False).critical( error_template, msg=msg, example=load_example_rc(), ) sys.exit(1) def validate_config(config, main_section): if not config.has_section(main_section): die("No [%s] section found." % main_section) twiggy.quickSetup( name2level(config.get(main_section, 'log.level')), config.get(main_section, 'log.file') ) if not config.has_option(main_section, 'targets'): die("No targets= item in [%s] found." % main_section) targets = config.get(main_section, 'targets') targets = filter(lambda t: len(t), [t.strip() for t in targets.split(",")]) if not targets: die("Empty targets= item in [%s]." % main_section) for target in targets: if target not in config.sections(): die("No [%s] section found." % target) # Validate each target one by one. for target in targets: service = config.get(target, 'service') if not service: die("No 'service' in [%s]" % target) if service not in SERVICES: die("'%s' in [%s] is not a valid service." % (service, target)) # Call the service-specific validator SERVICES[service].validate_config(config, target) def load_config(main_section): config = ConfigParser({'log.level': "DEBUG", 'log.file': None}) path = None first_path = BaseDirectory.load_first_config('bugwarrior') if first_path is not None: path = os.path.join(first_path, 'bugwarriorrc') old_path = os.path.expanduser("~/.bugwarriorrc") if path is None or not os.path.exists(path): if os.path.exists(old_path): path = old_path else: path = os.path.join(BaseDirectory.save_config_path('bugwarrior'), 'bugwarriorrc') config.readfp( codecs.open( path, "r", "utf-8", ) ) config.interactive = False # TODO: make this a command-line option validate_config(config, main_section) return config def get_taskrc_path(conf, main_section): path = '~/.taskrc' if conf.has_option(main_section, 'taskrc'): path = conf.get(main_section, 'taskrc') return os.path.normpath( os.path.expanduser(path) ) # This needs to be imported here and not above to avoid a circular-import. from bugwarrior.services import SERVICES
27,2015 @author: Yongxiang Qiu, Kay Kasemir ''' try: import xml.etree.cElementTree as ET except: import xml.etree.ElementTree as ET from datetime import datetime def getTimeSeries(data, name, convert='plain'): '''Get values aligned by different types of time. :param name: channel name :param convert: plain -> timestamp as seconds since epoch datetime -> datetime objects :return: value list with time Example: >>> data.getTimeSeries(..) ''' if convert == 'plain': return [ [t for t in data[name]['time'] ], [v for v in data[name]['value']] ] elif convert == 'datetime': return [ [str(getDatetime(time)) for time in data[name]['time']], [ v for v in data[name]['value']] ] def getDatetime(time): '''Convert log time :param time: Posix millisecond timestamp of logged sample :return: datetime ''' secs = time / 1000.0 return datetime.fromtimestamp(secs) def alignSerial(data, channel): ''' Iterate data by serial ID. :param: channel: Name of channel(device) needed to be iterate. :return: ( (id1, value1, time1) ,(id2, value2, time2), ..., (idn, valuen, timen)) ''' R = list(range(len(data[channel]['id']))) for i in iter(R): yield (data[channel]['id'][i], data[channel]['value'][i], data[channel]['time'][i]) ##TODO: step def alignTime(data, channel, intv = 0): ''' Iterate data by time. :param: channel: Name of channel(device) needed to be iterate. :return: Iterator object. ''' R = list(range(len(data[channel]['time']))) for i in iter(R): yield (data[channel]['time'][i], data[channel]['value'][i]) def getTable(data, *devices, **kwargs): '''Create data table Aligns samples for given list of devices by sample ID. Assuming that serialID in data is Ascending. Ignoring the serialID 'gap'. :param devices: One or more devices :param kwargs: with_id=True to add sample serial id, with_time=True to add time (seconds since epoch) :return: Table. result[0],result[1], .. hold the sample ID (if with_id), the time (if with_time), then the values for first device, for second device and so on. ''' with_id = kwargs['with_id'] if 'with_id' in kwargs else False with_time = kwargs['with_time'] if 'with_time' in kwargs else False devsIters = [ alignSerial(data, dev) for dev in devices] # prepare devices iterators cur_samps = [next(devIt) for devIt in devsIters] # initial devices iterators result = [[] for dev in devices] if with_id: result.insert(0, []) if with_time: result.insert(0, []) cur_id = -1 # current sample id cur_time = 0 # Current sample time index = 0 while True: try : cur_id = min((samp[0] for samp in cur_samps if samp is not None)) # find smallest sample ID as current id cur_time = max((samp[2] for samp in cur_samps if samp is not None)) # find last time stamp except ValueError: # finished break data_col = 0 if with_id: result[data_col].append(cur_id) data_col += 1 if with_time: result[data_col].append(cur_time) data_col += 1 for i in range(len(devsIters)): # for each device ,there are 3 situations: if cur_samps[i] is None: # 1. if device has been exhausted. result[data_col+i].append(result[data_col+i][index-1]) # continue with previous value elif cur_samps[i][0] == cur_id: # 2. if serial_id is the current id ( means this device was logged at current serial_id) try: result[data_col+i].append(cur_samps[i][1]) # fetch value cur_samps[i] = next(devsIters[i]) # step iter of current device and its value except StopIteration: # if current device is just exhausted cur_samps[i] = None elif cur_samps[i][0] > cur_id: #3. if serial_id is in the future ( means this device was not logged at the current serial_id) if index == 0: # 1st loop result[data_col+i].append(None) else: result[data_col+i].append(result[data_col+i][index-1]) # fetch and save the previous value index += 1 return result class Data(object): ''' classdocs ''' def __init__(self, Xml): ''' Constructor ''' self.__logData = self.__parseRaw(Xml) def __parseRaw(self,Xml): ''' Raw Shape: for example : logData={ 'Xpos':{'id':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'time' : [t1,t2,t3,t4,t5,t6,t7,t8,t9,t10], 'value': [0, 0, 1, 2, 3, 3, 3, 3, 3, 3], }, 'ypos':{'id':[4, 5, 6, 7, 8, 9], 'time' : [t1,t2,t3,t4,t5,t6], 'value': [0, 1, 1, 2, 3, 4], }, ... 'somePV':{'id':[0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], 'time' : [t1,t2,t3,t4,t5,t6, t7, t8, t9,t10,t11,t12,t13,t14,t15,t16] 'value': [v1,v2,v3,v4,v5,v6, v7, v8, v9,v10,v11,v12,v13,v14,v15,v16] } } ''' channels = ET.fromstring(Xml).iter('device') logdata = {} for channel in channels: samples = channel.findall('.//sample') logdata[channel.find('name').text] = { 'id' : [int(sample.attrib['id']) for sample in samples], 'time' : [int(sample.find('time').text) for sample in samples], 'value' : [self.__types((sample.find('value').text)) for sample in samples] } return logdata def __types(self, text): ''' Try to cast text to float or int. ''' try: if '.' in text: return float(text) else: return int(text) except ValueError: return text finally: return text def __getitem__(self, key): return self.__logData[key] def PVlist(self): ''' Get the list of all PV names. ''' return list(self.__logData.keys()) def PV(self, PVname): ''' Get all data of a PV. :param PVname: Name of the PV. :return: Dictionary of the data sets, like: {'id':[...], 'time':[...], 'value'[...]} ''' return self.__logData[PVname] def PVvalue(self, PVname): ''' Get all values of a PV, with :param PVname: Name of the PV. :return: List of the values of the PV, like: [0.1,0.2,...,19.2] ''' return self.__logData[PVname]['value'] def PVtime(self, PVname): ''' Get all timestamps of a PV. :param PVname: Name of the PV. :return: List of the timestamps of the PV, like: ['1427396679782', '1427396679782', ... , '1427396679782'] ''' return self.__logData[PVname]['time'] def __str__(self): ''' Give a readable printing of the logged data. ''' prettyOut = '' for key in self.__logData: prettyOu
t += key + ' : \n'
prettyOut += '{\n' prettyOut += " 'id' : " + str(self.__logData[key]['id']) + ' ,\n' prettyOut += " 'time' : " + str(self.__logData[key]['time']) + ' ,\n' prettyOut += " 'value' : " + str(self.__lo
# -*- coding: utf-8 -*- u""" Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. This file is part of Toolium. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITION
S OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from nose.tools import assert_equal from android.pageobjects.menu import MenuPageObject from android.pageobjects.tabs import TabsPageObject from android.test_cases import AndroidTestCase class Tabs(AndroidTestCase): def test_change_tab(self): # Open tabs activity MenuPageObject().open_option('Views').open_option('Tabs').open_option('1. Content By Id') tabs_page = TabsPageObject() # Check that the first tab is open assert_equal('tab1', tabs_page.content1.text) # Open second tab and check content tabs_page.tab2.click() assert_equal('tab2', tabs_page.content2.text)
# -*- coding: utf-8 -*- # Copyright (c) 2015, Michael Droettboom All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # The views and conclusions contained in the software and # documentation are those of the authors and should not be interpreted # as representing official policies, either expressed or implied, of # the FreeBSD Project. from __future__ import print_function, unicode_literals, absolute_import Bitmap__init__ = """ A structure used to describe a bitmap or pixmap to the raster. `Bitmap` supports the Python buffer interface, so it is easy to convert it to a Numpy array. For example:: >>> import numpy as np >>> a = np.asarray(bitmap) """ Bitmap_buffer = """ Get the bitmap's contents as a buffer. In most cases, the preferred method to get the data is to cast the `Bitmap` object to a memoryview, since that will also have size and type information. """ Bitmap_convert = """ Convert a `Bitmap` to 8 bits per pixel. Given a `Bitmap` with depth 1bpp, 2bpp, 4bpp, or 8bpp converts it to one with depth 8bpp, making the number of used bytes per line (a.k.a. the ‘pitch’) a multiple of `alignment`. Parameters ---------- alignment : int, optional The pitch of the bitma
p is a multiple of this parameter. Common values
are 1, 2, or 4. Returns ------- target : Bitmap The bitmap, converted to 8bpp. """ Bitmap_num_grays = """ The number of gray levels used in the bitmap. This field is only used with `PIXEL_MODE.GRAY`. """ Bitmap_pitch = """ The number of bytes taken by one bitmap row. Includes padding. The pitch is positive when the bitmap has a ‘down’ flow, and negative when it has an ‘up’ flow. In all cases, the pitch is an offset to add to a bitmap pointer in order to go down one row. Note that ‘padding’ means the alignment of a bitmap to a byte border, and FreeType functions normally align to the smallest possible integer value. For the B/W rasterizer, `pitch` is always an even number. To change the pitch of a bitmap (say, to make it a multiple of 4), use `Bitmap.convert`. Alternatively, you might use callback functions to directly render to the application's surface. """ Bitmap_pixel_mode = """ The `PIXEL_MODE`, i.e., how pixel bits are stored. """ Bitmap_rows = """ The number of bitmap rows. """ Bitmap_to_list = """ |freetypy| Convert the bitmap to a nested list. """ Bitmap_width = """ The number of pixels in bitmap row. """ PIXEL_MODE = """ Constants related to the pixel mode of bitmaps. - `MONO`: A monochrome bitmap, using 1 bit per pixel. Note that pixels are stored in most-significant order (MSB), which means that the left-most pixel in a byte has value 128. - `GRAY`: An 8-bit bitmap, generally used to represent anti-aliased glyph images. Each pixel is stored in one byte. Note that the number of ‘gray’ levels is stored in the ‘num_grays’ field of the Bitmap structure (it generally is 256). - `GRAY2`: A 2-bit per pixel bitmap, used to represent embedded anti-aliased bitmaps in font files according to the OpenType specification. We haven't found a single font using this format, however. - `GRAY4`: A 4-bit per pixel bitmap, representing embedded anti-aliased bitmaps in font files according to the OpenType specification. We haven't found a single font using this format, however. - `LCD`: An 8-bit bitmap, representing RGB or BGR decimated glyph images used for display on LCD displays; the bitmap is three times wider than the original glyph image. See also `RENDER_MODE.LCD`. On many freetype builds, this functionality will be disabled due to patent restrictions, in which case the resulting bitmap will be grayscale. - `LCD_V`: An 8-bit bitmap, representing RGB or BGR decimated glyph images used for display on rotated LCD displays; the bitmap is three times taller than the original glyph image. See also `RENDER_MODE.LCD_V`. On many freetype builds, this functionality will be disabled due to patent restrictions, in which case the resulting bitmap will be grayscale. """
azure.core.polling.LROPoller[~azure.mgmt.costmanagement.models.OperationStatus] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatus"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._by_billing_account_id_initial( billing_account_id=billing_account_id, start_date=start_date, end_date=end_date, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('OperationStatus', pipeline_re
sponse) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'billingAccountId': self._serialize.url("billing_account_id", billing_account_id, 'str'), } if polling is True:
polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_by_billing_account_id.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/providers/Microsoft.CostManagement/generateReservationDetailsReport'} # type: ignore def _by_billing_profile_id_initial( self, billing_account_id, # type: str billing_profile_id, # type: str start_date, # type: str end_date, # type: str **kwargs # type: Any ): # type: (...) -> Optional["_models.OperationStatus"] cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatus"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-11-01" accept = "application/json" # Construct URL url = self._by_billing_profile_id_initial.metadata['url'] # type: ignore path_format_arguments = { 'billingAccountId': self._serialize.url("billing_account_id", billing_account_id, 'str'), 'billingProfileId': self._serialize.url("billing_profile_id", billing_profile_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['startDate'] = self._serialize.query("start_date", start_date, 'str') query_parameters['endDate'] = self._serialize.query("end_date", end_date, 'str') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) response_headers = {} deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatus', pipeline_response) if response.status_code == 202: response_headers['Location']=self._deserialize('str', response.headers.get('Location')) response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After')) if cls: return cls(pipeline_response, deserialized, response_headers) return deserialized _by_billing_profile_id_initial.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/providers/Microsoft.CostManagement/generateReservationDetailsReport'} # type: ignore def begin_by_billing_profile_id( self, billing_account_id, # type: str billing_profile_id, # type: str start_date, # type: str end_date, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller["_models.OperationStatus"] """Generates the reservations details report for provided date range asynchronously by billing profile. :param billing_account_id: BillingAccount ID. :type billing_account_id: str :param billing_profile_id: BillingProfile ID. :type billing_profile_id: str :param start_date: Start Date. :type start_date: str :param end_date: End Date. :type end_date: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either OperationStatus or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.costmanagement.models.OperationStatus] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatus"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._by_billing_profile_id_initial( billing_account_id=billing_account_id, billing_profile_id=billing_profile_id, start_date=start_date, end_date=end_date, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('OperationStatus', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'billingAccountId': self._serialize.url("billing_account_id", billing_account_id, 'str'), 'billingProfileId': self._serialize.url("billing_profile_id", billing_profile_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path
#!/usr/bin/env python # # Build QT5 webengine # import os import sys import xsysroot if __name__ == '__main__': # You want to be careful to enable a debug build. libQtWebengine.so takes 817MB :-) debug_build=False build_mode='release' if not debug_build else 'debug' # We need a xsysroot profile with QT5 built in it if len(sys.argv) > 1: xprofile=sys.argv[1] else: print 'Need a xsysroot profile' sys.exit(1) # Find and activate the xsysroot profile print '>>> Opening xsysroot profile: {}'.format(xprofile) try: picute=xsysroot.XSysroot(profile=xprofile) except: print 'You need to create a Xsysroot Picute profile' print 'Please see the README file' sys.exit(1) # Locate Webengine source code directory webengine_path=os.path.join(picute.query('tmp'), 'qt5/qtwebengine') # Apply temporary patch to build QT5.7 Webengine for the RPI # https://bugreports.qt.io/browse/QTBUG-57037 if not os.path.isdir(webengine_path): print '>>> Could not find Webengine path: {}'.format(webengine_path) sys.exit(1) else: patch_file='gyp_run.pro' print '>>> Overwriting webengine qmake file: {}'.format(patch_file) rc=os.system('cp {} {}/src/core'.format(patch_file, webengine_path)) if rc: print 'Could not apply patch' sys.exit(1) else: print '>>> Webengine patch has been applied' # Now mount image if needed print '>>> Accessing image...' if not picute.is_mounted(): if not picute.mount(): sys.exit(1) # Step 1: QMAKE print '>>> Running Qmake...' cmdline_prefix='export PKG_CONFIG_PATH={}/usr/li
b/arm-linux-gnueabihf/pkgconfig'.format(picute.query('sysroot')) print '>>> cmdline_prefix: ', cmdline_prefix qmake_
command='{}/usr/local/qt5/bin/qmake ' \ 'WEBENGINE_CONFIG+=use_proprietary_codecs CONFIG+={}'.format(picute.query('sysroot'), build_mode) print '>>> Qmake command:', qmake_command rc=os.system('{} ; cd {} ; {}'.format(cmdline_prefix, webengine_path, qmake_command)) if rc: print '>>> Qmake failed rc={} :-('.format(rc) sys.exit(1) # Step 2: MAKE print '>>> Running Make...' rc=os.system('{} ; cd {} ; make'.format(cmdline_prefix, webengine_path)) if rc: print '>>> Make failed rc={} :-('.format(rc) sys.exit(1) # Step 3: INSTALL print '>>> Running Make Install...' rc=os.system('cd {} ; sudo make install'.format(webengine_path)) if rc: print '>>> Make install failed rc={} :-('.format(rc) sys.exit(1) print '>>> Webengine built and installed' # Webengine build complete: Unmount image if not picute.umount(): print '>>> WARNING: Image is busy, most likely installation left some running processes.' sys.exit(1) sys.exit(0)
#-*- coding: utf-8 -*- import struct import pytest from proteusisc.controllerManager import getDriverInstanceForDevice from proteusisc.jtagScanChain import JTAGScanChain from proteusisc.test_utils import FakeUSBDev, FakeDevHandle,\ MockPhysicalJTAGDevice, FakeXPCU1Handle from proteusisc.bittypes import bitarray, NoCareBitarray def test_black_hole_register_constraints_three_black_holes(): #Tests that the compiler can work around black hole registers #to get data where it needs to go. The expected behavior is #to create three different frames, one per prim, but the frame #state is not being tested here... just the results in the regs. dev0 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID, name="D0", status=bitarray('111100')) dev1 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID, name="D1", status=bitarray('111101')) dev2 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID, name="D2", status=bitarray('111110')) usbdev = FakeUSBDev(FakeXPCU1Handle(dev0, dev1, dev2)) chain = JTAGScanChain(getDriverInstanceForDevice(usbdev)) d0, d1, d2 = get_XC3S1200E(chain), get_XC3S1200E(chain), \ get_XC3S1200E(chain) chain._hasinit = True chain._devices = [d0, d1, d2] chain.jtag_enable() d0.run_instruction("CFG_IN", data=bitarray('11010001')) d1.run_instruction("CFG_IN", data=bitarray('01101010111')) d2.run_instruction("CFG_IN",data=bitarray('11110')) chain.flush() assert "110100010110101011111110" not in dev0.\ event_history, "All data written into the first black "\ "hole register. Black Holes not avoided." #The extra zero in the arary are from shifting in the first #bits. Some of these zeros may go away if unnecessary trailing #bypass data is later skipped. assert "11010001" in dev0.DRs[None].dumpData().to01() assert "01101010111" in dev1.DRs[None].dumpData().to01() assert "11110" in dev2.DRs[None].dumpData().to01() def test_black_hole_register_constraints_complimentary_prims(): #Tests if a Blask Hole Read, a Black Hole Write, and a nocare #write are combined in a way that satisfies all requests. The #expected behavior is to combine these three non colliding prims #into a single frame, but the frame state is not being tested #here... just the results in the regs. dev0 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID, name="D0", status=bitarray('111100')) dev1 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID, name="D1", status=bitarray('111101')) dev2 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID, name="D2", status=bitarray('111110')) usbdev = FakeUSBDev(FakeXPCU1Handle(dev0, dev1, dev2)) chain = JTAGScanChain(getDriverInstanceForDevice(usbdev)) d0, d1, d2 = get_XC3S1200E(chain), get_XC3S1200E(chain), \ get_XC3S1
200E(chain) chain._hasinit = True chain._devices = [d0, d1, d2] chain.jtag_enable() d0.run_instruction("CFG_IN", data=bitarray('11010001')) d1.run_instruction("BYPASS", data=NoCareBitarray(1)) a, _ = d2.run_instruction("CFG_IN", read=True, bitcount=8) chain.flush() assert a() == bitarray('00000000') assert "1101000100" in dev0.DRs[None].dumpData().to01() XC3S1200E_ID = bitarray('00000001110000101110
000010010011') def get_XC3S1200E(chain): return chain.initialize_device_from_id(chain, XC3S1200E_ID) def test_black_hole_register_constraints_bad_order_complimentary_prims(): #Tests if a Blask Hole Read, a Black Hole Write, and a nocare #write are combined in a way that satisfies all requests. The #expected behavior is to combine these three non colliding prims #into a single frame, but the frame state is not being tested #here... just the results in the regs. dev0 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID, name="D0", status=bitarray('111100')) dev1 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID, name="D1", status=bitarray('111101')) dev2 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID, name="D2", status=bitarray('111110')) usbdev = FakeUSBDev(FakeXPCU1Handle(dev0, dev1, dev2)) chain = JTAGScanChain(getDriverInstanceForDevice(usbdev)) d0, d1, d2 = get_XC3S1200E(chain), get_XC3S1200E(chain), \ get_XC3S1200E(chain) chain._hasinit = True chain._devices = [d0, d1, d2] chain.jtag_enable() d2.run_instruction("CFG_IN", data=bitarray('11010001')) d1.run_instruction("BYPASS", data=NoCareBitarray(1)) a, _ = d1.run_instruction("CFG_IN", read=True, bitcount=8) chain.flush() assert a() == bitarray('00000000') assert "1101000100" in dev2.DRs[None].dumpData().to01()