Dataset Viewer
hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7e9653d546ade6c8ce9b53c49b25b1b21568a5c
| 5,267
|
py
|
Python
|
VisualGimp/Markup.py
|
duangsuse/VisualGimp
|
79776fded12595ab3c56855b5ae56e2242780b2e
|
[
"MIT"
] | 2
|
2019-05-07T12:09:11.000Z
|
2019-05-08T09:31:44.000Z
|
VisualGimp/Markup.py
|
duangsuse-valid-projects/VisualGimp
|
79776fded12595ab3c56855b5ae56e2242780b2e
|
[
"MIT"
] | null | null | null |
VisualGimp/Markup.py
|
duangsuse-valid-projects/VisualGimp
|
79776fded12595ab3c56855b5ae56e2242780b2e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- encoding: utf-8 -*-
# Gimp Markup Builder
# author: duangsuse
# date: Thu May 02 2019 CST
| 24.962085
| 106
| 0.584204
|
#!/usr/bin/env python2
# -*- encoding: utf-8 -*-
# Gimp Markup Builder
# author: duangsuse
# date: Thu May 02 2019 CST
from os import linesep
from Util import stream_join
class MarkupBuilder:
''' Gimp Markup SGML builder '''
def __init__(self, indent = -1, nl = linesep, buffer = str):
self.marks = buffer()
self.tag_stack = list()
self.nl = nl
self.indent = indent
self.last_spaces = 0
self.revert_last_indent_size = 0
self.last_is_text = False
'''
Indent rules:
when starting new tag, write last spaces, last spaces += indent
if new tag is not text tag start (inner is just text), write newline
when leaving tag, last spaces -= indent
'''
def useindent(self): return self.indent != -1
indented = property(useindent)
def wnewline(self):
''' see use_indent'''
self.marks += self.nl
def windent(self):
''' see use_indent'''
wrote = 0
for _ in range(0, self.last_spaces):
self.marks += ' '
wrote += 1 # dummy?
return wrote
def cancel_indent(self):
''' cancel last indent '''
if self.indented: self.marks = self.marks[:-self.revert_last_indent_size]
def do_indent(self, entering = True):
''' Write indent, increase last_spaces, saving wrote spaces and newline to revert_last_indent_size '''
def do():
self.wnewline()
if (entering):
self.last_spaces += self.indent
else: self.last_spaces -= self.indent
self.revert_last_indent_size = self.windent() +1
if self.indented: do()
def do_last_indent(self, *args, **kwargs):
''' write indenting for last block '''
self.last_spaces -= self.indent
self.do_indent(*args, **kwargs)
self.last_spaces += self.indent
def begin(self, tag, attrs = {}):
'''
Make a tag with name and attributes
Attribute name, value and tag name is escaped
'''
self.last_is_text = False
attrst = str()
tagscape = self.escape(tag)
ary = list(stream_join(attrs.keys(), attrs.values())) if attrs.__class__ is dict else list(attrs)
if len(attrs) != 0:
for n in range(0, len(ary), 2):
attrst += self.escape(str(ary[n]))
attrst += '='
#print(ary)
#print(n)
attrst += "\"%s\"" % self.escape(str(ary[n+1]))
self.marks += '<' + tagscape
if len(attrs) != 0: self.marks += ' '
self.marks += attrst + '>'
# always write indents for next line
# makes its possible to drop last indent (text tag)
self.do_indent()
self.tag_stack.append(tagscape)
return self
def make(self): return self.marks
def tag(self, *args, **kwargs):
r'''
EDSL using __close__ with syntax
create nodes like:
with xml.tag('span', {color: '#66ccff'}):
xml % 'Q \w\ Q'
'''
self.last_is_text = False
class TagBuilder:
def __init__(self, xml):
self.xml = xml
def __enter__(self):
self.xml.begin(*args, attrs = kwargs)
def __exit__(self, *lveinfo):
self.xml.end()
return TagBuilder(self)
def text(self, content):
''' append text content '''
self.last_is_text = True
if self.indented: self.cancel_indent()
self.marks += self.escape(content)
return self
#@staticmethod
#def test():
# m = MarkupBuilder()
# m > 'html'
# m > 'head'
# m > 'title'
# m < 'Hello World'
# m <= 2
# m > 'body'
# m > 'text'
# with m.tag("b"):
# m < 'String'
# m >= ['a', {'id': 'str'}]
# m < '|sg.'
# m <= 4
# return m
def end(self):
''' delimites last tag '''
if not self.last_is_text: # cancel indentation
#print(self.indent, self.tag_stack)
self.cancel_indent()
self.do_indent(False)
self.marks += '</' + self.tag_stack.pop() + '>'
self.do_indent(False)
self.last_is_text = False
# Not cared by Markup indent emitter
def raw(self, raw):
''' write raw text (unescaped) '''
self.marks += raw
return self
def rawtag(self, rawtext):
''' append unescaped raw <> text '''
self.marks += '<'
self.marks += rawtext
self.marks += '>'
def _escape(self, xml):
'''
Escape XML string
' is replaced with '
" is replaced with "
& is replaced with &
< is replaced with <
> is replaced with >
'''
escapes = frozenset("'\"&<>")
replacement = { '\'': 'apos', '"': 'quot', '&': 'amp', '<': 'lt', '>': 'gt' }
if len(xml) < 1: return
output = str()
for i in range(0, len(xml)):
char = xml[i]
if (char in escapes):
output += '&'
output += replacement[char]
output += ';'
else: output += char
return output
escape = classmethod(_escape)
def __str__(self):
''' M(marks)..[tag stack] '''
return 'M(' + self.marks + ')..' + str(self.tag_stack)
__lt__ = text # chain
__gt__ = begin # chain
__add__ = raw # chain
def __contains__(self, tag):
''' is tag inside enclosing tags ? '''
return tag in self.tag_stack
def __ge__(self, tag_attr):
''' xml >= ['markup', {'name': 'abcs'}] '''
mark = tag_attr[0]
attr = tag_attr[1]
self.begin(mark, attr)
def __le__(self, n = 1):
''' Leave (close) N tags '''
while n > 0:
self.end()
n -= 1
| 0
| 0
| 0
| 5,069
| 0
| 0
| 0
| 8
| 69
|
8796a12ade2e6974f6dfc98adc77e755604d7da8
| 895
|
py
|
Python
|
sqlalchemy_redshift/__init__.py
|
Hivestack/sqlalchemy-redshift
|
6226ffe4c6f3583606016492641e1bd5d351933a
|
[
"MIT"
] | null | null | null |
sqlalchemy_redshift/__init__.py
|
Hivestack/sqlalchemy-redshift
|
6226ffe4c6f3583606016492641e1bd5d351933a
|
[
"MIT"
] | null | null | null |
sqlalchemy_redshift/__init__.py
|
Hivestack/sqlalchemy-redshift
|
6226ffe4c6f3583606016492641e1bd5d351933a
|
[
"MIT"
] | null | null | null |
from pkg_resources import DistributionNotFound, get_distribution, parse_version
try:
except ImportError:
raise ImportError(
'No module named psycopg2. Please install either '
'psycopg2 or psycopg2-binary package for CPython '
'or psycopg2cffi for Pypy.'
)
for package in ['psycopg2', 'psycopg2-binary', 'psycopg2cffi']:
try:
if get_distribution(package).parsed_version < parse_version('2.5'):
raise ImportError('Minimum required version for psycopg2 is 2.5')
break
except DistributionNotFound:
pass
__version__ = get_distribution('hs-sqlalchemy-redshift').version
from sqlalchemy.dialects import registry
registry.register("redshift", "sqlalchemy_redshift.dialect", "RedshiftDialect")
registry.register(
"redshift.psycopg2", "sqlalchemy_redshift.dialect", "RedshiftDialect"
)
| 31.964286
| 79
| 0.727374
|
from pkg_resources import DistributionNotFound, get_distribution, parse_version
try:
import psycopg2 # noqa: F401
except ImportError:
raise ImportError(
'No module named psycopg2. Please install either '
'psycopg2 or psycopg2-binary package for CPython '
'or psycopg2cffi for Pypy.'
)
for package in ['psycopg2', 'psycopg2-binary', 'psycopg2cffi']:
try:
if get_distribution(package).parsed_version < parse_version('2.5'):
raise ImportError('Minimum required version for psycopg2 is 2.5')
break
except DistributionNotFound:
pass
__version__ = get_distribution('hs-sqlalchemy-redshift').version
from sqlalchemy.dialects import registry
registry.register("redshift", "sqlalchemy_redshift.dialect", "RedshiftDialect")
registry.register(
"redshift.psycopg2", "sqlalchemy_redshift.dialect", "RedshiftDialect"
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -6
| 40
|
fdbf1c941811766f3c215aa9700b09effe98e5e6
| 134
|
py
|
Python
|
ch2/chapter2_features_of_fastapi_02.py
|
PacktPublishing/Understanding-How-Web-APIs-Work
|
63220e7bf6b31315c46650e45c670ca9a01011fc
|
[
"MIT"
] | 2
|
2021-10-03T09:34:34.000Z
|
2021-10-04T04:52:48.000Z
|
ch2/chapter2_features_of_fastapi_02.py
|
PacktPublishing/Understanding-How-Web-APIs-Work
|
63220e7bf6b31315c46650e45c670ca9a01011fc
|
[
"MIT"
] | 1
|
2021-04-25T05:57:34.000Z
|
2021-04-25T14:49:24.000Z
|
ch2/chapter2_features_of_fastapi_02.py
|
PacktPublishing/Understanding-How-Web-APIs-Work
|
63220e7bf6b31315c46650e45c670ca9a01011fc
|
[
"MIT"
] | 3
|
2021-05-13T09:39:27.000Z
|
2021-06-29T05:51:46.000Z
|
# -*- coding: utf-8 -*-
| 33.5
| 57
| 0.58209
|
# -*- coding: utf-8 -*-
def message(age: int = 0, name: str = 'stranger') -> str:
return f'Hello {name}, you are {age} years old'
| 0
| 0
| 0
| 0
| 0
| 88
| 0
| 0
| 22
|
515654029ae48e70e4487c739d107ea440403f1d
| 8,124
|
py
|
Python
|
Lib/site-packages/hackedit/app/templates.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
Lib/site-packages/hackedit/app/templates.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/hackedit/app/templates.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
"""
This module contains the top level API for managing the project/file templates.
"""
import json
import os
from hackedit.app import settings
def create(template, dest_dir, answers):
"""
Creates a file/project from the specified template, at the specified directory.
:param template: Template data.
:param dest_dir: Destination directory where to create the file/project
:param answers: Dict of answers for substitution variables
"""
ret_val = []
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
src_dir = template['path']
for root, dirs, files in os.walk(src_dir):
for file in files:
if file == 'template.json' or file.endswith('.pyc'):
continue
src, dst = get_paths(root, file, src_dir, dest_dir)
dst = subsitute_vars(dst)
encoding = get_file_encoding(src)
try:
content = open_file(src, encoding)
except OSError:
_logger().exception('failed to open file: %r', src)
if encoding != 'binary':
content = subsitute_vars(content)
if file == 'btpad_btn_img_0.png':
print(len(content), encoding)
try:
open_file(dst, encoding, to_write=content)
except PermissionError:
_logger().exception('failed to write file: %r', dst)
else:
ret_val.append(dst)
assert open_file(dst, encoding) == content
for directory in dirs:
src, dst = get_paths(root, directory, src_dir, dest_dir)
dst = subsitute_vars(dst)
try:
os.mkdir(dst)
except PermissionError:
_logger().exception('failed to create directory: %r', dst)
return ret_val
def get_sources():
"""
Returns the template sources (directory associated with a label).
"""
s = settings.load()
tmpl_sources = s.value('_templates/sources', '[]')
tmpl_sources = json.loads(tmpl_sources)
return sorted(tmpl_sources, key=lambda x: x['label'])
def add_source(label, path):
"""
Adds a template source
:param label: Name of the template source.
:param path: Path of the template source.
"""
tmpl_sources = get_sources()
tmpl_sources.append({'label': label, 'path': path})
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def rm_source(label):
"""
Removes the specified template source.
:param label: Name of the template source to remove.
"""
tmpl_sources = get_sources()
for src in tmpl_sources:
if src['label'] == label:
tmpl_sources.remove(src)
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def clear_sources():
"""
Clear template sources.
"""
s = settings.load()
s.setValue('_templates/sources', json.dumps([]))
def get_template(source, template):
"""
Returns the specified template data.
"""
for t in get_templates(source_filter=source):
if t['name'] == template:
return t
return None
if __name__ == '__main__':
clear_sources()
add_source('COBOL', '/home/colin/Documents/hackedit-cobol/hackedit_cobol/templates')
add_source('Python', '/home/colin/Documents/hackedit-python/hackedit_python/templates')
for tmpl in get_templates():
print(json.dumps(tmpl, indent=4, sort_keys=True))
| 31.126437
| 100
| 0.563269
|
"""
This module contains the top level API for managing the project/file templates.
"""
import json
import logging
import os
import re
from binaryornot.check import is_binary
from hackedit.app import settings
def create(template, dest_dir, answers):
"""
Creates a file/project from the specified template, at the specified directory.
:param template: Template data.
:param dest_dir: Destination directory where to create the file/project
:param answers: Dict of answers for substitution variables
"""
def get_paths(root, path, src_dir, dest_dir):
src_path = os.path.join(root, path)
rel_path = os.path.relpath(src_path, src_dir)
dst_path = os.path.join(dest_dir, rel_path)
return src_path, dst_path
def get_file_encoding(path):
if is_binary(path):
return 'binary'
try:
encodings = template['encodings']
except KeyError:
encodings = ['utf-8', 'cp1252']
for encoding in encodings:
try:
with open(path, encoding=encoding) as f:
f.read()
except UnicodeDecodeError:
continue
else:
return encoding
def open_file(path, encoding, to_write=None):
if encoding == 'binary':
if to_write is None:
mode = 'rb'
else:
mode = 'wb'
encoding = None
else:
if to_write is None:
mode = 'r'
else:
mode = 'w'
content = None
with open(path, mode, encoding=encoding) as f:
if to_write is not None:
f.write(to_write)
else:
content = f.read()
return content
def subsitute_vars(string):
for var, value in answers.items():
string = re.sub('@%s@' % var, value, string)
return string
ret_val = []
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
src_dir = template['path']
for root, dirs, files in os.walk(src_dir):
for file in files:
if file == 'template.json' or file.endswith('.pyc'):
continue
src, dst = get_paths(root, file, src_dir, dest_dir)
dst = subsitute_vars(dst)
encoding = get_file_encoding(src)
try:
content = open_file(src, encoding)
except OSError:
_logger().exception('failed to open file: %r', src)
if encoding != 'binary':
content = subsitute_vars(content)
if file == 'btpad_btn_img_0.png':
print(len(content), encoding)
try:
open_file(dst, encoding, to_write=content)
except PermissionError:
_logger().exception('failed to write file: %r', dst)
else:
ret_val.append(dst)
assert open_file(dst, encoding) == content
for directory in dirs:
src, dst = get_paths(root, directory, src_dir, dest_dir)
dst = subsitute_vars(dst)
try:
os.mkdir(dst)
except PermissionError:
_logger().exception('failed to create directory: %r', dst)
return ret_val
def get_sources():
"""
Returns the template sources (directory associated with a label).
"""
s = settings.load()
tmpl_sources = s.value('_templates/sources', '[]')
tmpl_sources = json.loads(tmpl_sources)
return sorted(tmpl_sources, key=lambda x: x['label'])
def add_source(label, path):
"""
Adds a template source
:param label: Name of the template source.
:param path: Path of the template source.
"""
tmpl_sources = get_sources()
tmpl_sources.append({'label': label, 'path': path})
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def rm_source(label):
"""
Removes the specified template source.
:param label: Name of the template source to remove.
"""
tmpl_sources = get_sources()
for src in tmpl_sources:
if src['label'] == label:
tmpl_sources.remove(src)
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def clear_sources():
"""
Clear template sources.
"""
s = settings.load()
s.setValue('_templates/sources', json.dumps([]))
def get_templates(category='', source_filter=''):
"""
Gets the list of templates.
:param category: Template category to retrieve.
- use "Project" to get project templates
- use "File" to get file templates
- use an empty string to retrieve them all (default).
:param source: Label of the source of the templates to retrieve. Use an empty string to retrieve
templates from all sources.
"""
def filtered_sources():
"""
Filter list of sources based on the ``source`` parameter.
"""
tmpl_sources = get_sources()
filtered = []
if source_filter:
# only keep the requested template source
for src in tmpl_sources:
if src['label'] == source_filter:
filtered.append(src)
break
else:
filtered = tmpl_sources
return filtered
def get_template(tdir):
"""
Returns template data for the given template directory.
Returns None if the template is invalid.
:param tdir: Template directory to get data from.
"""
tmpl = None
template_json = os.path.join(tdir, 'template.json')
if not os.path.exists(template_json):
# no template.json -> invalid template
_logger().warn('"template.json" not found in template directory: %r', tdir)
else:
try:
with open(template_json) as f:
tmpl = json.loads(f.read())
except (OSError, json.JSONDecodeError):
# unreadable template.json -> invalid template
_logger().exception('failed to read %r', template_json)
tmpl = None
else:
try:
tmpl_cat = tmpl['category']
except KeyError:
# no metadata or no category in template.json -> invalid template
_logger().exception('failed to read category from template metadata, '
'incomplete template.json?')
tmpl = None
else:
# valid template (finally).
tmpl['source'] = src
if category and category != tmpl_cat:
_logger().debug('rejecting template directory: %r, invalid category', tdir)
tmpl = None
return tmpl
def listdir(directory):
"""
Securely list subdirectories of ``directory``.
Returns an empty list of an OSError occurred.
"""
try:
return os.listdir(directory)
except OSError:
return []
for src in filtered_sources():
for tdir in listdir(src['path']):
tdir = os.path.join(src['path'], tdir)
if os.path.isfile(tdir):
continue
tmpl = get_template(tdir)
if tmpl:
tmpl['path'] = tdir
yield tmpl
def get_template(source, template):
"""
Returns the specified template data.
"""
for t in get_templates(source_filter=source):
if t['name'] == template:
return t
return None
def _logger():
return logging.getLogger(__name__)
if __name__ == '__main__':
clear_sources()
add_source('COBOL', '/home/colin/Documents/hackedit-cobol/hackedit_cobol/templates')
add_source('Python', '/home/colin/Documents/hackedit-python/hackedit_python/templates')
for tmpl in get_templates():
print(json.dumps(tmpl, indent=4, sort_keys=True))
| 0
| 0
| 0
| 0
| 3,037
| 1,348
| 0
| -1
| 220
|
1a60970d1a4cf3ecc7aacdd16b38eca549a34840
| 1,845
|
py
|
Python
|
src/tubize/videotomp4.py
|
olivervinn/tubizescripts
|
8756f322d3e31f76f8b77cb8e084ded5941e29fa
|
[
"MIT"
] | null | null | null |
src/tubize/videotomp4.py
|
olivervinn/tubizescripts
|
8756f322d3e31f76f8b77cb8e084ded5941e29fa
|
[
"MIT"
] | null | null | null |
src/tubize/videotomp4.py
|
olivervinn/tubizescripts
|
8756f322d3e31f76f8b77cb8e084ded5941e29fa
|
[
"MIT"
] | null | null | null |
"""
Convert video format x to MP4/H.264.
"""
import logging
logger = logging.getLogger(__name__)
| 32.368421
| 107
| 0.571816
|
"""
Convert video format x to MP4/H.264.
"""
import os
import sys
import logging
from .videometainfo import VideoMetaInfo
from .utils import sizeof_fmt, time_fmt, find_files, check_dependencies, call, ffmpeg
logger = logging.getLogger(__name__)
class VideoToMP4:
"""To Mp4"""
SUPPORTED_EXTENSIONS = ".wmv, .avi, .mkv, .mov, .flv"
RULES = {
".wmv": "-c:v libx264 -crf 19 ",
".avi":
"-vf yadif=1 -c:v h264_nvenc -preset slow -tune film -crf 17",
".mkv": "-c copy",
".mov": "-vcodec h264 -acodec aac -strict -2 -crf 19 ",
".flv": " -r 20 ",
}
def process(self, video_file: str):
"""Convert video files to MP4 container format."""
name = os.path.splitext(video_file)[0]
ext = os.path.splitext(video_file)[1]
new_name = f"{name}.mp4"
if os.path.exists(new_name):
logger.info(f"Skipping file {new_name} already exists!")
elif ext not in VideoToMP4.RULES:
logger.error(f"Skipping unsupported type {ext}!")
else:
print(f'Convert {ext} to MP4 {new_name} ... ')
meta_info = VideoMetaInfo(video_file)
rule = VideoToMP4.RULES[ext]
flags = "-movflags +faststart -pix_fmt yuv420p"
ffmpeg(
f'-i "{video_file}" {flags} {rule} -metadata date="{meta_info.original_date}" "{new_name}"'
)
def file(self, filename: str) -> None:
logger.debug(f"converting file {filename}")
self.process(filename)
def directory(self, path: str, extension: str) -> int:
files = find_files(path, extension)
if len(files) < 1:
print("No matching files found in directory!", file=sys.stderr)
else:
for f in files:
self.file(f)
| 0
| 0
| 0
| 1,573
| 0
| 0
| 0
| 60
| 113
|
8052d0446907259540de210ff2c92410c7342f2e
| 117
|
py
|
Python
|
setup.py
|
snasiriany/parasol
|
88b99704676fb1253b8bc6402665a3174a00072d
|
[
"MIT"
] | 66
|
2019-01-07T23:59:26.000Z
|
2021-12-29T16:51:56.000Z
|
setup.py
|
snasiriany/parasol
|
88b99704676fb1253b8bc6402665a3174a00072d
|
[
"MIT"
] | 8
|
2019-01-09T01:35:54.000Z
|
2021-08-23T20:05:03.000Z
|
setup.py
|
snasiriany/parasol
|
88b99704676fb1253b8bc6402665a3174a00072d
|
[
"MIT"
] | 21
|
2019-03-26T01:02:33.000Z
|
2022-01-26T20:34:34.000Z
|
from setuptools import setup
setup(
name='parasol',
dependency_links=[
],
install_requires=[
]
)
| 13
| 28
| 0.623932
|
from setuptools import setup
setup(
name='parasol',
dependency_links=[
],
install_requires=[
]
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
79299c770a188b579e6412af89f2263960e65f50
| 568
|
py
|
Python
|
app/migrations/0007_auto_20211102_1946.py
|
Rqwannn/Rudemy
|
fe2d84540f3cc64c0ff6821e5f2fac22675fd381
|
[
"MIT"
] | 3
|
2021-12-27T06:16:26.000Z
|
2022-01-20T02:13:03.000Z
|
app/migrations/0007_auto_20211102_1946.py
|
Rqwannn/Rudemy
|
fe2d84540f3cc64c0ff6821e5f2fac22675fd381
|
[
"MIT"
] | null | null | null |
app/migrations/0007_auto_20211102_1946.py
|
Rqwannn/Rudemy
|
fe2d84540f3cc64c0ff6821e5f2fac22675fd381
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-11-02 12:46
| 21.846154
| 67
| 0.549296
|
# Generated by Django 3.2.8 on 2021-11-02 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20211102_1928'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='skill',
),
migrations.AddField(
model_name='profile',
name='tags',
field=models.ManyToManyField(blank=True, to='app.Tag'),
),
migrations.DeleteModel(
name='Skill',
),
]
| 0
| 0
| 0
| 454
| 0
| 0
| 0
| 19
| 46
|
752ee840202809a32e9848a1a2c9a1828e74e71c
| 5,132
|
py
|
Python
|
oasislmf/model_execution/conf.py
|
ibailey-SCOR/OasisLMF
|
966b4de4e1e64851970f4291c5bdfe7edc20cb7a
|
[
"BSD-3-Clause"
] | null | null | null |
oasislmf/model_execution/conf.py
|
ibailey-SCOR/OasisLMF
|
966b4de4e1e64851970f4291c5bdfe7edc20cb7a
|
[
"BSD-3-Clause"
] | null | null | null |
oasislmf/model_execution/conf.py
|
ibailey-SCOR/OasisLMF
|
966b4de4e1e64851970f4291c5bdfe7edc20cb7a
|
[
"BSD-3-Clause"
] | null | null | null |
import csv
import io
import json
import warnings
from collections import defaultdict
from ..utils.exceptions import OasisException
def _get_summaries(summary_file):
"""
Get a list representation of a summary file.
"""
summaries_dict = defaultdict(lambda: {'leccalc': {}})
with io.open(summary_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
id = int(row[0])
if row[1].startswith('leccalc'):
summaries_dict[id]['leccalc'][row[1]] = row[2].lower() == 'true'
else:
summaries_dict[id][row[1]] = row[2].lower() == 'true'
summaries = list()
for id in sorted(summaries_dict):
summaries_dict[id]['id'] = id
summaries.append(summaries_dict[id])
return summaries
def read_analysis_settings(analysis_settings_fp, il_files_exist=False,
ri_files_exist=False):
"""Read the analysis settings file"""
# Load analysis_settings file
try:
# Load as a json
with io.open(analysis_settings_fp, 'r', encoding='utf-8') as f:
analysis_settings = json.load(f)
# Extract the analysis_settings part within the json
if analysis_settings.get('analysis_settings'):
analysis_settings = analysis_settings['analysis_settings']
except (IOError, TypeError, ValueError):
raise OasisException('Invalid analysis settings file or file path: {}.'.format(
analysis_settings_fp))
# Reset il_output if the files are not there
if not il_files_exist or 'il_output' not in analysis_settings:
# No insured loss output
analysis_settings['il_output'] = False
analysis_settings['il_summaries'] = []
# Same for ri_output
if not ri_files_exist or 'ri_output' not in analysis_settings:
# No reinsured loss output
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
# If we want ri_output, we will need il_output, which needs il_files
if analysis_settings['ri_output'] and not analysis_settings['il_output']:
if not il_files_exist:
warnings.warn("ri_output selected, but il files not found")
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
else:
analysis_settings['il_output'] = True
# guard - Check if at least one output type is selected
if not any([
analysis_settings['gul_output'] if 'gul_output' in analysis_settings else False,
analysis_settings['il_output'] if 'il_output' in analysis_settings else False,
analysis_settings['ri_output'] if 'ri_output' in analysis_settings else False,
]):
raise OasisException(
'No valid output settings in: {}'.format(analysis_settings_fp))
return analysis_settings
| 36.657143
| 100
| 0.677319
|
import csv
import io
import json
import logging
import os
import warnings
from collections import defaultdict
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from .files import GENERAL_SETTINGS_FILE, GUL_SUMMARIES_FILE, IL_SUMMARIES_FILE, MODEL_SETTINGS_FILE
def _get_summaries(summary_file):
"""
Get a list representation of a summary file.
"""
summaries_dict = defaultdict(lambda: {'leccalc': {}})
with io.open(summary_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
id = int(row[0])
if row[1].startswith('leccalc'):
summaries_dict[id]['leccalc'][row[1]] = row[2].lower() == 'true'
else:
summaries_dict[id][row[1]] = row[2].lower() == 'true'
summaries = list()
for id in sorted(summaries_dict):
summaries_dict[id]['id'] = id
summaries.append(summaries_dict[id])
return summaries
@oasis_log
def create_analysis_settings_json(directory):
"""
Generate an analysis settings JSON from a set of
CSV files in a specified directory.
Args:
``directory`` (string): the directory containing the CSV files.
Returns:
The analysis settings JSON.
"""
if not os.path.exists(directory):
error_message = "Directory does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings_file = os.path.join(directory, GENERAL_SETTINGS_FILE)
model_settings_file = os.path.join(directory, MODEL_SETTINGS_FILE)
gul_summaries_file = os.path.join(directory, GUL_SUMMARIES_FILE)
il_summaries_file = os.path.join(directory, IL_SUMMARIES_FILE)
for file in [general_settings_file, model_settings_file, gul_summaries_file, il_summaries_file]:
if not os.path.exists(file):
error_message = "File does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings = dict()
with io.open(general_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
general_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
model_settings = dict()
with io.open(model_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
model_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
gul_summaries = _get_summaries(gul_summaries_file)
il_summaries = _get_summaries(il_summaries_file)
analysis_settings = general_settings
analysis_settings['model_settings'] = model_settings
analysis_settings['gul_summaries'] = gul_summaries
analysis_settings['il_summaries'] = il_summaries
output_json = json.dumps(analysis_settings)
logging.getLogger().info("Analysis settings json: {}".format(output_json))
return output_json
def read_analysis_settings(analysis_settings_fp, il_files_exist=False,
ri_files_exist=False):
"""Read the analysis settings file"""
# Load analysis_settings file
try:
# Load as a json
with io.open(analysis_settings_fp, 'r', encoding='utf-8') as f:
analysis_settings = json.load(f)
# Extract the analysis_settings part within the json
if analysis_settings.get('analysis_settings'):
analysis_settings = analysis_settings['analysis_settings']
except (IOError, TypeError, ValueError):
raise OasisException('Invalid analysis settings file or file path: {}.'.format(
analysis_settings_fp))
# Reset il_output if the files are not there
if not il_files_exist or 'il_output' not in analysis_settings:
# No insured loss output
analysis_settings['il_output'] = False
analysis_settings['il_summaries'] = []
# Same for ri_output
if not ri_files_exist or 'ri_output' not in analysis_settings:
# No reinsured loss output
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
# If we want ri_output, we will need il_output, which needs il_files
if analysis_settings['ri_output'] and not analysis_settings['il_output']:
if not il_files_exist:
warnings.warn("ri_output selected, but il files not found")
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
else:
analysis_settings['il_output'] = True
# guard - Check if at least one output type is selected
if not any([
analysis_settings['gul_output'] if 'gul_output' in analysis_settings else False,
analysis_settings['il_output'] if 'il_output' in analysis_settings else False,
analysis_settings['ri_output'] if 'ri_output' in analysis_settings else False,
]):
raise OasisException(
'No valid output settings in: {}'.format(analysis_settings_fp))
return analysis_settings
| 0
| 2,033
| 0
| 0
| 0
| 0
| 0
| 72
| 111
|
cb8ea6149e57e707c1ee331f670e37c8feb61914
| 6,815
|
py
|
Python
|
codes/functions.py
|
Wenupi/protoplanetary_disks
|
51f8decbec5415e1da9893316f03d32ca5ab27de
|
[
"MIT"
] | null | null | null |
codes/functions.py
|
Wenupi/protoplanetary_disks
|
51f8decbec5415e1da9893316f03d32ca5ab27de
|
[
"MIT"
] | null | null | null |
codes/functions.py
|
Wenupi/protoplanetary_disks
|
51f8decbec5415e1da9893316f03d32ca5ab27de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#--------------------------------------------------------------------------------
#Changes the sky coordinates (x,y,z) to the disk coordinates (x_d,y_d,z_d)
#The x axis is the rotation axis
#--------------------------------------------------------------------------------
#Radiative transfer equation
#--------------------------------------------------------------------------------
#Optical depth
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#Black body radiation
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#Lee las tablas de opacidad DSHARP
#Load opacities
with np.load('default_opacities_smooth.npz') as d:
a_w = d['a']
gsca_w = d['g']
lam_w = d['lam']
k_abs_w = d['k_abs']
k_sca_w = d['k_sca']
lam_avgs = wl
# We split the opacities within the range of frequency to make the calculations faster
k_abs_w = k_abs_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w*(1. - gsca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:])
lam_w = lam_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w)]
opac_grid = opacity.size_average_opacity(lam_avgs, a_w, lam_w, k_abs_w.T, k_sca_w.T, q=3.5, plot=True)
function_ext = interpolate.interp1d(a_w, opac_grid['ka'][:]+opac_grid['ks'][:],kind='cubic')
function_alb = interpolate.interp1d(a_w, opac_grid['ks'][:]/(opac_grid['ka'][:]+opac_grid['ks'][:]),kind='cubic')
if not scattering:
function_alb = interpolate.interp1d(a_w, np.zeros((np.shape(opac_grid['ks'][:]))),kind='cubic')
| 43.685897
| 134
| 0.501981
|
#!/usr/bin/env python
#--------------------------------------------------------------------------------
#Changes the sky coordinates (x,y,z) to the disk coordinates (x_d,y_d,z_d)
#The x axis is the rotation axis
def FUN_rotation(x,y,z):
x_d = x
y_d = y*np.cos(inc) - z*np.sin(inc)
z_d = y*np.sin(inc) + z*np.cos(inc)
return x_d,y_d,z_d
#--------------------------------------------------------------------------------
#Radiative transfer equation
def FUN_intensity(I,z,x,y,optde):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
S = funcion_S([z_d,y_d,x_d])
# print ('x,y,z', x,y,z)
# print (S, x_d, y_d, z_d)
# print (optde(z))
dIdz = -S*opa*density*np.exp(-optde(z)) #z es la variable de integracion (debe ser evaluada en cualquier punto)
return dIdz
#--------------------------------------------------------------------------------
#Optical depth
def FUN_tau(tt,z,x,y):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
def FUN_tau_zaxis(tt,z,x,y):
x_d,y_d,z_d = x,y,z
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
#Black body radiation
def FUN_BB(nu,T):
# B = 2.*hP*nu**3/clight**2/( np.exp(hP*nu/kB/T) - 1.)
B = 1./( np.exp(hP*nu/kB/T) - 1.)
return B
#--------------------------------------------------------------------------------
def FUN_limits_mult(xx,yy):
Hout = EQ_Height(Rout)
lim_z = Rout*np.sin(inc) + 2.*Hout*np.cos(inc) #Based on the geometry of the disk
lim_y = Rout*np.cos(inc) + 2.*Hout*np.sin(inc) #Based on the geometry of the disk
z_arr = np.linspace(1.1*lim_z, -1.1*lim_z, 200)
z_crit = []
if ((np.abs(xx) <=Rout) and (np.abs(yy) <= lim_y)):
xd,yd,zd = FUN_rotation(xx,yy,z_arr)
crit = np.zeros((len(z_arr)))
###############################################################################
#Funciona pero podria ser optimizado
###############################################################################
for ii in range(len(z_arr)): #Crea un vector de densidad en la linea de vision
if (EQ_density(xd,yd[ii],zd[ii]) == 0.):
crit[ii] = 0
else:
crit[ii] = 1
for ii in range(len(z_arr)): #Ve los indices donde cambia de 0 a algun valor, o de algun valor a 0 (fronteras)
if ( (ii != 0) and (crit[ii] - crit[ii-1] != 0 )):
z_crit.append(z_arr[ii])
elif(ii == 0 and crit[0] == 1):
z_crit.append(z_arr[0])
###############################################################################
return z_crit
#--------------------------------------------------------------------------------
def FUN_creates_source_function(x_array,y_array):
#Arrays and limits
Hout = EQ_Height(Rout)
z_array = np.linspace(-2.*Hout, 2.*Hout, 200)
Sfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
Temfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
op_depth_p = np.zeros((len(y_array),len(x_array)))
#Computes the optical depth (perpendicular to the disk midplane)
for j in range(len(y_array)):
for i in range(len(x_array)):
if(x_array[i] == 0. and y_array[j] == 0.):
Sfunction[:,j,i] = 0.
Temfunction[:,j,i] = 0.
else:
rad = np.sqrt(x_array[i]**2 + y_array[j]**2)
Hscale = EQ_Height(rad)
z_integ = np.linspace(2.*Hscale,-2.*Hscale,200)
sol = odeint(FUN_tau_zaxis,0.,z_integ,args=(x_array[i],y_array[j])).T[0]
op_depth_p[j][i] = sol[len(z_integ)-1]
inter_opt = interpolate.interp1d(z_integ,sol,kind='linear', bounds_error=False,fill_value=0.)
for k in range(len(z_array)):
amax = EQ_amax(x_array[i],y_array[j],z_array[k])
albedo = function_alb(amax)
##########Temperature##########
Omega2 = Ggrav*Mstar/(rad*AU)**3
Teff4 = 3.*Mdot*Omega2/8./np.pi/sigmaB
Tacc4 = 3./4.*(7.*inter_opt(abs(z_array[k])) + 2./3.)*Teff4
Tirr4 = Tstar**4./4.*(Rstar/rad/AU)**2*np.exp(-7.*inter_opt(abs(z_array[k]))/phi_angle)
Temfunction[k,j,i] = (Tacc4 + Tirr4)**(0.25)
#Temfunction[k,j,i] = EQ_temperature(x_array[i],y_array[j],z_array[k])
###############################
Sfunction[k,j,i] = FUN_BB(nu,Temfunction[k,j,i])*(1.+ albedo*FUN_f(inter_opt(z_array[k]),op_depth_p[j][i],albedo))
#Crea funcion fuente y temperatura en 3D
funcion_S = RegularGridInterpolator((z_array, y_array, x_array), Sfunction,bounds_error=False,fill_value=None)
funcion_T = RegularGridInterpolator((z_array, y_array, x_array), Temfunction,bounds_error=False,fill_value=None)
return funcion_S, funcion_T
#--------------------------------------------------------------------------------
def FUN_f(t,tau,alb):
eps = np.sqrt(1.-alb)
fff = np.exp(-np.sqrt(3.)*eps*t) + np.exp(np.sqrt(3.)*eps*(t-tau))
fff = fff/( np.exp(-np.sqrt(3.)*eps*tau)*(eps-1.) - (eps+1.) )
return fff
#--------------------------------------------------------------------------------
#Lee las tablas de opacidad DSHARP
#Load opacities
with np.load('default_opacities_smooth.npz') as d:
a_w = d['a']
gsca_w = d['g']
lam_w = d['lam']
k_abs_w = d['k_abs']
k_sca_w = d['k_sca']
lam_avgs = wl
# We split the opacities within the range of frequency to make the calculations faster
k_abs_w = k_abs_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w*(1. - gsca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:])
lam_w = lam_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w)]
opac_grid = opacity.size_average_opacity(lam_avgs, a_w, lam_w, k_abs_w.T, k_sca_w.T, q=3.5, plot=True)
function_ext = interpolate.interp1d(a_w, opac_grid['ka'][:]+opac_grid['ks'][:],kind='cubic')
function_alb = interpolate.interp1d(a_w, opac_grid['ks'][:]/(opac_grid['ka'][:]+opac_grid['ks'][:]),kind='cubic')
if not scattering:
function_alb = interpolate.interp1d(a_w, np.zeros((np.shape(opac_grid['ks'][:]))),kind='cubic')
| 0
| 0
| 0
| 0
| 0
| 4,676
| 0
| 0
| 176
|
d281bf9d519356903906b4ce02f43f84e40f8147
| 2,893
|
py
|
Python
|
F0AM_Tools/TUV_to_mat.py
|
jdhask/pyMCM
|
32b65e1dff2e9626df5d52623fd1ac4af29f8c57
|
[
"MIT"
] | 1
|
2021-11-15T19:24:40.000Z
|
2021-11-15T19:24:40.000Z
|
F0AM_Tools/TUV_to_mat.py
|
jdhask/pyMCM
|
32b65e1dff2e9626df5d52623fd1ac4af29f8c57
|
[
"MIT"
] | null | null | null |
F0AM_Tools/TUV_to_mat.py
|
jdhask/pyMCM
|
32b65e1dff2e9626df5d52623fd1ac4af29f8c57
|
[
"MIT"
] | 2
|
2021-11-15T19:23:46.000Z
|
2021-11-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 18:06:05 2021
@author: jhask
"""
import csv
import pandas as pd
import numpy as np
import re
import scipy.io as sio
import os
# Map MCM names to TUV labels
j_vals_dict= dict({
'O3 -> O2 + O(1D)':'J1',
'O3 -> O2 + O(3P)':'J2',
'H2O2 -> 2 OH':'J3',
'NO2 -> NO + O(3P)':'J4',
'NO3 -> NO + O2':'J5',
'NO3 -> NO2 + O(3P)':'J6',
'HNO2 -> OH + NO':'J7',
'HNO3 -> OH + NO2':'J8',
'CH2O -> H + HCO':'J11',
'CH2O -> H2 + CO':'J12',
'CH3CHO -> CH3 + HCO':'J13',
'C2H5CHO -> C2H5 + HCO':'J14',
'CH2=C(CH3)CHO -> Products':'J18',
'CH3COCH3 -> CH3CO + CH3':'J21',
'CH3COCH2CH3 -> CH3CO + CH2CH3':'J22',
'CH3COCH=CH2 -> Products':'J23',
'CHOCHO -> H2 + 2CO':'J31',
'CHOCHO -> CH2O + CO':'J32',
'CHOCHO -> HCO + HCO':'J33',
'CH3COCHO -> CH3CO + HCO':'J34',
'CH3COCOCH3 -> Products':'J35',
'CH3OOH -> CH3O + OH':'J41',
'CH3ONO2 -> CH3O + NO2':'J51',
'C2H5ONO2 -> C2H5O + NO2':'J52',
'n-C3H7ONO2 -> C3H7O + NO2':'J53',
'CH3CHONO2CH3 -> CH3CHOCH3 + NO2':'J54',
'C(CH3)3(ONO2) -> C(CH3)3(O.) + NO2':'J55',
'CH3COCH2(ONO2) -> CH3COCH2(O.) + NO2':'J56',
'CH2(OH)COCH3 -> CH3CO + CH2(OH)':'Jn10',
'CH2=CHCHO -> Products':'Jn11',
'CH3CO(OONO2) -> CH3CO(OO) + NO2':'Jn14',
'CH3CO(OONO2) -> CH3CO(O) + NO3':'Jn15',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn16',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn17',
'N2O5 -> NO3 + NO2':'Jn19',
'N2O5 -> NO3 + NO + O(3P)':'Jn20',
'HNO4 -> HO2 + NO2':'Jn21'})
#TUV output file.
file= 'C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/foam_6_29_out.txt'
with open(file, "r",errors="ignore") as f: # read line by line.
reader = csv.reader(f, delimiter="\t")
# Initialize vars we fill in reading the file.
ln_num = 0; map_cols=dict({})
in_species_list=False;
pass_go=False
for row in reader:
line = " ".join(row) # read line by line.
hdrs= [key for key in list(j_vals_dict.keys()) if key in line]
if len(hdrs) > 0 :
headers= re.search(r"[\d]*[\=\w]", line)
print(line, hdrs, j_vals_dict[ hdrs[:][0]])
if headers: map_cols[headers.group()]=j_vals_dict[ hdrs[:][0]]
if (pass_go is True) and ('------' not in line ):
# Append the j-values to the dataframe at this point in time.
splt= [float(item) for item in line.split(" ") if item !='']
df.loc[len(df)]=np.array(splt)
if 'time, hrs. sza, deg.' in line:
pass_go=True
df=pd.DataFrame(columns= ['time', 'sza']+ list(map_cols.values()))
to_mat={name: col.values for name, col in df.items()}
filename= os.path.join('C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/'+'F0AM_tuv.mat')
sio.savemat(filename, to_mat)
print(filename)
| 30.452632
| 105
| 0.5458
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 18:06:05 2021
@author: jhask
"""
import csv
import pandas as pd
import numpy as np
import re
import scipy.io as sio
import os
# Map MCM names to TUV labels
j_vals_dict= dict({
'O3 -> O2 + O(1D)':'J1',
'O3 -> O2 + O(3P)':'J2',
'H2O2 -> 2 OH':'J3',
'NO2 -> NO + O(3P)':'J4',
'NO3 -> NO + O2':'J5',
'NO3 -> NO2 + O(3P)':'J6',
'HNO2 -> OH + NO':'J7',
'HNO3 -> OH + NO2':'J8',
'CH2O -> H + HCO':'J11',
'CH2O -> H2 + CO':'J12',
'CH3CHO -> CH3 + HCO':'J13',
'C2H5CHO -> C2H5 + HCO':'J14',
'CH2=C(CH3)CHO -> Products':'J18',
'CH3COCH3 -> CH3CO + CH3':'J21',
'CH3COCH2CH3 -> CH3CO + CH2CH3':'J22',
'CH3COCH=CH2 -> Products':'J23',
'CHOCHO -> H2 + 2CO':'J31',
'CHOCHO -> CH2O + CO':'J32',
'CHOCHO -> HCO + HCO':'J33',
'CH3COCHO -> CH3CO + HCO':'J34',
'CH3COCOCH3 -> Products':'J35',
'CH3OOH -> CH3O + OH':'J41',
'CH3ONO2 -> CH3O + NO2':'J51',
'C2H5ONO2 -> C2H5O + NO2':'J52',
'n-C3H7ONO2 -> C3H7O + NO2':'J53',
'CH3CHONO2CH3 -> CH3CHOCH3 + NO2':'J54',
'C(CH3)3(ONO2) -> C(CH3)3(O.) + NO2':'J55',
'CH3COCH2(ONO2) -> CH3COCH2(O.) + NO2':'J56',
'CH2(OH)COCH3 -> CH3CO + CH2(OH)':'Jn10',
'CH2=CHCHO -> Products':'Jn11',
'CH3CO(OONO2) -> CH3CO(OO) + NO2':'Jn14',
'CH3CO(OONO2) -> CH3CO(O) + NO3':'Jn15',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn16',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn17',
'N2O5 -> NO3 + NO2':'Jn19',
'N2O5 -> NO3 + NO + O(3P)':'Jn20',
'HNO4 -> HO2 + NO2':'Jn21'})
#TUV output file.
file= 'C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/foam_6_29_out.txt'
with open(file, "r",errors="ignore") as f: # read line by line.
reader = csv.reader(f, delimiter="\t")
# Initialize vars we fill in reading the file.
ln_num = 0; map_cols=dict({})
in_species_list=False;
pass_go=False
for row in reader:
line = " ".join(row) # read line by line.
hdrs= [key for key in list(j_vals_dict.keys()) if key in line]
if len(hdrs) > 0 :
headers= re.search(r"[\d]*[\=\w]", line)
print(line, hdrs, j_vals_dict[ hdrs[:][0]])
if headers: map_cols[headers.group()]=j_vals_dict[ hdrs[:][0]]
if (pass_go is True) and ('------' not in line ):
# Append the j-values to the dataframe at this point in time.
splt= [float(item) for item in line.split(" ") if item !='']
df.loc[len(df)]=np.array(splt)
if 'time, hrs. sza, deg.' in line:
pass_go=True
df=pd.DataFrame(columns= ['time', 'sza']+ list(map_cols.values()))
to_mat={name: col.values for name, col in df.items()}
filename= os.path.join('C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/'+'F0AM_tuv.mat')
sio.savemat(filename, to_mat)
print(filename)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1d7b25e9a1db4f378a05b7199423917d7b5b9f81
| 1,343
|
py
|
Python
|
extract_url.py
|
nickinack/extract_url
|
d084ca0a791d5c50ab2accaee7cb4d0b981bd132
|
[
"MIT"
] | 2
|
2022-02-07T05:51:36.000Z
|
2022-02-07T05:52:11.000Z
|
extract_url.py
|
nickinack/extract_url
|
d084ca0a791d5c50ab2accaee7cb4d0b981bd132
|
[
"MIT"
] | null | null | null |
extract_url.py
|
nickinack/extract_url
|
d084ca0a791d5c50ab2accaee7cb4d0b981bd132
|
[
"MIT"
] | 1
|
2020-05-18T08:29:22.000Z
|
2020-05-18T08:29:22.000Z
|
'''
Imports
'''
import sys as sys
import csv
from collections import defaultdict
import re
'''
URL Extract
'''
columns = defaultdict(list)
with open('SecurityIDRBT.csv') as f:
reader = csv.DictReader(f) # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
columns[k].append(v) # append the value into the appropriate list
url_list = [] # based on column name k
for element in range(len(columns['Body'])):
urls = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', columns['Body'][element])
for url in urls:
url_list.append(url)
'''
Find Unique URLs and filter with semantic search results
'''
url_unique = []
for element in url_list:
if element not in url_unique:
if element not in common_urls_http:
if element not in common_urls_https:
url_unique.append(element)
'''
Write it in a new CSV
'''
with open('url.csv', 'w',newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
for word in url_unique:
wr.writerow([word])
| 29.844444
| 95
| 0.603127
|
'''
Imports
'''
from config import *
from newspaper import Article
import sys as sys
import pandas as pd
import csv
from collections import defaultdict
import re
'''
URL Extract
'''
columns = defaultdict(list)
with open('SecurityIDRBT.csv') as f:
reader = csv.DictReader(f) # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
columns[k].append(v) # append the value into the appropriate list
url_list = [] # based on column name k
for element in range(len(columns['Body'])):
urls = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', columns['Body'][element])
for url in urls:
url_list.append(url)
'''
Find Unique URLs and filter with semantic search results
'''
url_unique = []
for element in url_list:
if element not in url_unique:
if element not in common_urls_http:
if element not in common_urls_https:
url_unique.append(element)
'''
Write it in a new CSV
'''
with open('url.csv', 'w',newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
for word in url_unique:
wr.writerow([word])
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 70
|
56b682792eb61ccb189ac68b9d7a874cbd6c0a60
| 3,279
|
py
|
Python
|
test/python/test_mapper_coupling.py
|
kifumi/qiskit-terra
|
203fca6d694a18824a6b12cbabd3dd2c64dd12ae
|
[
"Apache-2.0"
] | 1
|
2018-11-01T01:35:43.000Z
|
2018-11-01T01:35:43.000Z
|
test/python/test_mapper_coupling.py
|
a-amaral/qiskit-terra
|
e73beba1e68de2617046a7e1e9eeac375b61de81
|
[
"Apache-2.0"
] | null | null | null |
test/python/test_mapper_coupling.py
|
a-amaral/qiskit-terra
|
e73beba1e68de2617046a7e1e9eeac375b61de81
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring
| 36.433333
| 88
| 0.633425
|
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring
from qiskit.mapper import _coupling
from .common import QiskitTestCase
class CouplingTest(QiskitTestCase):
def test_coupling_dict2list(self):
input_dict = {0: [1, 2], 1: [2]}
result = _coupling.coupling_dict2list(input_dict)
expected = [[0, 1], [0, 2], [1, 2]]
self.assertEqual(expected, result)
def test_coupling_dict2list_empty_dict(self):
self.assertIsNone(_coupling.coupling_dict2list({}))
def test_coupling_list2dict(self):
input_list = [[0, 1], [0, 2], [1, 2]]
result = _coupling.coupling_list2dict(input_list)
expected = {0: [1, 2], 1: [2]}
self.assertEqual(expected, result)
def test_coupling_list2dict_empty_list(self):
self.assertIsNone(_coupling.coupling_list2dict([]))
def test_empty_coupling_class(self):
coupling = _coupling.Coupling()
self.assertEqual(0, coupling.size())
self.assertEqual([], coupling.get_qubits())
self.assertEqual([], coupling.get_edges())
self.assertFalse(coupling.connected())
self.assertEqual("", str(coupling))
def test_coupling_str(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
expected = ("qubits: q[0] @ 1, q[1] @ 2, q[2] @ 3\n"
"edges: q[0]-q[1], q[0]-q[2], q[1]-q[2]")
self.assertEqual(expected, str(coupling))
def test_coupling_compute_distance(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
self.assertTrue(coupling.connected())
coupling.compute_distance()
qubits = coupling.get_qubits()
result = coupling.distance(qubits[0], qubits[1])
self.assertEqual(1, result)
def test_coupling_compute_distance_coupling_error(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.compute_distance)
def test_add_qubit(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_qubit(('q', 0))
self.assertEqual("qubits: q[0] @ 1", str(coupling))
def test_add_qubit_not_tuple(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit, 'q0')
def test_add_qubit_tuple_incorrect_form(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit,
('q', '0'))
def test_add_edge(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_edge(("q", 0), ('q', 1))
expected = ("qubits: q[0] @ 1, q[1] @ 2\n"
"edges: q[0]-q[1]")
self.assertEqual(expected, str(coupling))
def test_distance_error(self):
"""Test distance method validation."""
graph = _coupling.Coupling({0: [1, 2], 1: [2]})
self.assertRaises(_coupling.CouplingError, graph.distance, ('q0', 0), ('q1', 1))
| 0
| 0
| 0
| 2,951
| 0
| 0
| 0
| 27
| 68
|
d991aedad470b351e70cf5b10b085c74cc95e474
| 462
|
py
|
Python
|
env/Lib/site-packages/values/__init__.py
|
KaceyHirth/Library-DBMS-System
|
40b425ed5c7b46627b7c48724b2d20e7a64cf025
|
[
"MIT"
] | 4
|
2022-02-06T00:54:58.000Z
|
2022-02-25T12:44:43.000Z
|
env/Lib/site-packages/values/__init__.py
|
KaceyHirth/Library-DBMS-System
|
40b425ed5c7b46627b7c48724b2d20e7a64cf025
|
[
"MIT"
] | 3
|
2021-03-23T04:58:47.000Z
|
2021-04-02T02:40:54.000Z
|
env/Lib/site-packages/values/__init__.py
|
KaceyHirth/Library-DBMS-System
|
40b425ed5c7b46627b7c48724b2d20e7a64cf025
|
[
"MIT"
] | 1
|
2022-02-08T13:43:20.000Z
|
2022-02-08T13:43:20.000Z
|
__all__ = ['get']
def get(input):
"""return a list with input values or [] if input is None"""
if input is None:
return []
if not _iterable(input) or _string(input):
return [input]
return list(input)
| 18.48
| 64
| 0.645022
|
__all__ = ['get']
import collections
def _iterable(obj):
return isinstance(obj, collections.Iterable)
def _string(value):
try:
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
def get(input):
"""return a list with input values or [] if input is None"""
if input is None:
return []
if not _iterable(input) or _string(input):
return [input]
return list(input)
| 0
| 0
| 0
| 0
| 0
| 159
| 0
| -3
| 69
|
d0e19b396bd5c3861e79601ace321dbbd96d9384
| 165
|
py
|
Python
|
vnpy/app/strategy_reviewer/ui/__init__.py
|
xyh888/vnpy
|
7b51716928ab9574f171a2eda190b37b4f393bb1
|
[
"MIT"
] | 5
|
2019-05-24T05:19:55.000Z
|
2020-07-29T13:21:49.000Z
|
vnpy/app/strategy_reviewer/ui/__init__.py
|
xyh888/vnpy
|
7b51716928ab9574f171a2eda190b37b4f393bb1
|
[
"MIT"
] | null | null | null |
vnpy/app/strategy_reviewer/ui/__init__.py
|
xyh888/vnpy
|
7b51716928ab9574f171a2eda190b37b4f393bb1
|
[
"MIT"
] | 2
|
2019-07-01T02:14:04.000Z
|
2020-07-29T13:21:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/20 0020 16:49
# @Author : Hadrianl
# @File : __init__.py
| 23.571429
| 36
| 0.630303
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/20 0020 16:49
# @Author : Hadrianl
# @File : __init__.py
from .widget import StrategyReviewer
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 23
|
4a04e22adafbd1373a9d9fc82325fd3d15005b8b
| 647
|
py
|
Python
|
Lesson 13.gf/xml_Leader2.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | null | null | null |
Lesson 13.gf/xml_Leader2.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | null | null | null |
Lesson 13.gf/xml_Leader2.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
xml_string = '''
<stuff>
<users>
<user x = "2">
<id>001</id>
<name>Chuck</name>
</user>
<user x = "7">
<id>007</id>
<name>Brent</name>
</user>
</users>
</stuff>
'''
root_stuff = ET.fromstring(xml_string)
#don't usually refer to root element
user_elements = root_stuff.findall('users/user')
print ('user count:', len(user_elements))
for user in user_elements:
print('name:', user.find('name').text)
print('id:', user.find('id').text)
print('attribute(x):', user.get('x'))
#to identify attribute use 'get's
| 23.107143
| 48
| 0.565688
|
import xml.etree.ElementTree as ET
xml_string = '''
<stuff>
<users>
<user x = "2">
<id>001</id>
<name>Chuck</name>
</user>
<user x = "7">
<id>007</id>
<name>Brent</name>
</user>
</users>
</stuff>
'''
root_stuff = ET.fromstring(xml_string)
#don't usually refer to root element
user_elements = root_stuff.findall('users/user')
print ('user count:', len(user_elements))
for user in user_elements:
print('name:', user.find('name').text)
print('id:', user.find('id').text)
print('attribute(x):', user.get('x'))
#to identify attribute use 'get's
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
bebc974c59298f013c68b5d5e434ba4b2d82a0a8
| 213
|
py
|
Python
|
第4章/program/Chapter_4_dummy.py
|
kingname/SourceCodeOfBook
|
ab7275108994dca564905818b678bbd2f771c18e
|
[
"MIT"
] | 274
|
2018-10-01T11:07:25.000Z
|
2022-03-17T13:48:45.000Z
|
第4章/program/Chapter_4_dummy.py
|
kingname/SourceCodeOfBook
|
ab7275108994dca564905818b678bbd2f771c18e
|
[
"MIT"
] | 6
|
2019-02-28T14:18:21.000Z
|
2022-03-02T14:57:39.000Z
|
第4章/program/Chapter_4_dummy.py
|
kingname/SourceCodeOfBook
|
ab7275108994dca564905818b678bbd2f771c18e
|
[
"MIT"
] | 110
|
2018-10-16T06:08:37.000Z
|
2022-03-16T08:19:29.000Z
|
from multiprocessing.dummy import Pool
pool = Pool(3)
origin_num = [x for x in range(10)]
result = pool.map(calc_power2, origin_num)
print(f'1-10{result}')
| 16.384615
| 42
| 0.71831
|
from multiprocessing.dummy import Pool
def calc_power2(num):
return num * num
pool = Pool(3)
origin_num = [x for x in range(10)]
result = pool.map(calc_power2, origin_num)
print(f'计算1-10的平方分别为:{result}')
| 27
| 0
| 0
| 0
| 0
| 21
| 0
| 0
| 23
|
cee8341ee37a27bddc6bb669594ab3c522880752
| 11,688
|
py
|
Python
|
pystiche_papers/li_wand_2016/_loss.py
|
pystiche/papers
|
0d8179dc51f6eda0b27fa525dc0b86b866bc88e1
|
[
"BSD-3-Clause"
] | 1
|
2021-09-30T09:30:07.000Z
|
2021-09-30T09:30:07.000Z
|
pystiche_papers/li_wand_2016/_loss.py
|
pystiche/papers
|
0d8179dc51f6eda0b27fa525dc0b86b866bc88e1
|
[
"BSD-3-Clause"
] | 20
|
2021-10-10T13:37:25.000Z
|
2022-03-31T07:31:45.000Z
|
pystiche_papers/li_wand_2016/_loss.py
|
pystiche/papers
|
0d8179dc51f6eda0b27fa525dc0b86b866bc88e1
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Optional
from pystiche import enc, loss
from pystiche_papers.utils import HyperParameters
from ._utils import (hyper_parameters as _hyper_parameters, multi_layer_encoder as _multi_layer_encoder)
__all__ = [
"FeatureReconstructionLoss",
"content_loss",
"MRFLoss",
"style_loss",
"TotalVariationLoss",
"regularization",
"perceptual_loss",
]
def content_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> FeatureReconstructionLoss:
r"""Content loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
:class:`pystiche_papers.li_wand_2016.FeatureReconstructionLoss`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters(impl_params=impl_params)
return FeatureReconstructionLoss(
multi_layer_encoder.extract_encoder(hyper_parameters.content_loss.layer),
impl_params=impl_params,
score_weight=hyper_parameters.content_loss.score_weight,
)
def style_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> loss.MultiLayerEncodingLoss:
r"""Style loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :class:`pystiche_papers.li_wand_2016.MRFLoss`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters(impl_params=impl_params)
return loss.MultiLayerEncodingLoss(
multi_layer_encoder,
hyper_parameters.style_loss.layers,
encoding_loss_fn,
layer_weights=hyper_parameters.style_loss.layer_weights,
score_weight=hyper_parameters.style_loss.score_weight,
)
def regularization(
impl_params: bool = True,
hyper_parameters: Optional[HyperParameters] = None,
) -> TotalVariationLoss:
r"""Regularization from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :class:`pystiche_papers.li_wand_2016.TotalVariationLoss`
"""
if hyper_parameters is None:
hyper_parameters = _hyper_parameters()
return TotalVariationLoss(
impl_params=impl_params,
score_weight=hyper_parameters.regularization.score_weight,
)
def perceptual_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> loss.PerceptualLoss:
r"""Perceptual loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :func:`pystiche_papers.li_wand_2016.content_loss`
- :func:`pystiche_papers.li_wand_2016.style_loss`
- :func:`pystiche_papers.li_wand_2016.regularization`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters()
return loss.PerceptualLoss(
content_loss(
impl_params=impl_params,
multi_layer_encoder=multi_layer_encoder,
hyper_parameters=hyper_parameters,
),
style_loss(
impl_params=impl_params,
multi_layer_encoder=multi_layer_encoder,
hyper_parameters=hyper_parameters,
),
regularization(impl_params=impl_params, hyper_parameters=hyper_parameters),
)
| 37.461538
| 110
| 0.693703
|
from typing import Any, Optional, Tuple, Union
import torch
from torch.nn.functional import mse_loss
import pystiche
import pystiche.loss.functional as F
from pystiche import enc, loss
from pystiche_papers.utils import HyperParameters
from ._utils import (
extract_normalized_patches2d,
hyper_parameters as _hyper_parameters,
multi_layer_encoder as _multi_layer_encoder,
target_transforms as _target_transforms,
)
__all__ = [
"FeatureReconstructionLoss",
"content_loss",
"MRFLoss",
"style_loss",
"TotalVariationLoss",
"regularization",
"perceptual_loss",
]
class FeatureReconstructionLoss(loss.FeatureReconstructionLoss):
r"""Feature reconstruction loss from :cite:`LW2016`.
Args:
encoder: Encoder used to encode the input.
impl_params: If ``False``, calculate the score with the squared error (SE)
instead of the mean squared error (MSE).
**feature_reconstruction_loss_kwargs: Additional parameters of a
:class:`pystiche.loss.FeatureReconstructionLoss`.
.. seealso::
:class:`pystiche.loss.FeatureReconstructionLoss`
"""
def __init__(
self,
encoder: enc.Encoder,
impl_params: bool = True,
**feature_reconstruction_loss_kwargs: Any,
):
super().__init__(encoder, **feature_reconstruction_loss_kwargs)
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/content.lua#L15
# nn.MSECriterion() was used as criterion to calculate the content loss, which
# by default uses reduction="mean"
self.loss_reduction = "mean" if impl_params else "sum"
def calculate_score(
self,
input_repr: torch.Tensor,
target_repr: torch.Tensor,
ctx: Optional[torch.Tensor],
) -> torch.Tensor:
return mse_loss(input_repr, target_repr, reduction=self.loss_reduction)
def content_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> FeatureReconstructionLoss:
r"""Content loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
:class:`pystiche_papers.li_wand_2016.FeatureReconstructionLoss`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters(impl_params=impl_params)
return FeatureReconstructionLoss(
multi_layer_encoder.extract_encoder(hyper_parameters.content_loss.layer),
impl_params=impl_params,
score_weight=hyper_parameters.content_loss.score_weight,
)
class MRFLoss(loss.MRFLoss):
r"""MRF loss from :cite:`LW2016`.
Args:
encoder: Encoder used to encode the input.
patch_size: Spatial size of the neural patches.
impl_params: If ``True``, normalize the gradient of the neural patches. If
``False``, use a score correction factor of 1/2.
**mrf_loss_kwargs: Additional parameters of a :class:`pystiche.loss.MRFLoss`.
In contrast to :class:`pystiche.loss.MRFLoss`, the score is calculated with the
squared error (SE) instead of the mean squared error (MSE).
.. seealso::
- :class:`pystiche.loss.MRFLoss`
- :func:`pystiche_papers.li_wand_2016.extract_normalized_patches2d`
"""
def __init__(
self,
encoder: enc.Encoder,
patch_size: Union[int, Tuple[int, int]],
impl_params: bool = True,
**mrf_loss_kwargs: Any,
):
super().__init__(encoder, patch_size, **mrf_loss_kwargs)
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/mrf.lua#L221
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/mrf.lua#L224
# They use normalized patches instead of the unnormalized patches described in
# the paper.
self.normalize_patches_grad = impl_params
self.loss_reduction = "sum"
# The score correction factor is not visible in the reference implementation
# of the original authors, since the calculation is performed with respect to
# the gradient and not the score. Roughly speaking, since the calculation
# comprises a *squared* distance, we need a factor of 1/2 in the forward pass.
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/mrf.lua#L220
self.score_correction_factor = 1.0 / 2.0 if impl_params else 1.0
def enc_to_repr(self, enc: torch.Tensor, is_guided: bool) -> torch.Tensor:
if self.normalize_patches_grad:
repr = extract_normalized_patches2d(enc, self.patch_size, self.stride)
else:
repr = pystiche.extract_patches2d(enc, self.patch_size, self.stride)
if not is_guided:
return repr
return self._guide_repr(repr)
def calculate_score(
self,
input_repr: torch.Tensor,
target_repr: torch.Tensor,
ctx: Optional[torch.Tensor],
) -> torch.Tensor:
score = F.mrf_loss(
input_repr, target_repr, reduction=self.loss_reduction, batched_input=True
)
return score * self.score_correction_factor
def style_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> loss.MultiLayerEncodingLoss:
r"""Style loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :class:`pystiche_papers.li_wand_2016.MRFLoss`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters(impl_params=impl_params)
def encoding_loss_fn(encoder: enc.Encoder, layer_weight: float) -> MRFLoss:
return MRFLoss(
encoder,
hyper_parameters.style_loss.patch_size, # type: ignore[union-attr]
impl_params=impl_params,
stride=hyper_parameters.style_loss.stride, # type: ignore[union-attr]
target_transforms=_target_transforms(
impl_params=impl_params, hyper_parameters=hyper_parameters
),
score_weight=layer_weight,
)
return loss.MultiLayerEncodingLoss(
multi_layer_encoder,
hyper_parameters.style_loss.layers,
encoding_loss_fn,
layer_weights=hyper_parameters.style_loss.layer_weights,
score_weight=hyper_parameters.style_loss.score_weight,
)
class TotalVariationLoss(loss.TotalVariationLoss):
r"""Total variation loss from :cite:`LW2016`.
Args:
impl_params: If ``False``, use a score correction factor of 1/2.
**total_variation_loss_kwargs: Additional parameters of a
:class:`pystiche.loss.TotalVariationLoss`.
In contrast to :class:`pystiche.loss.TotalVariationLoss`, the the score is
calculated with the squared error (SE) instead of the mean squared error (MSE).
.. seealso::
- :class:`pystiche.loss.TotalVariationLoss`
"""
def __init__(self, impl_params: bool = True, **total_variation_loss_kwargs: Any):
super().__init__(**total_variation_loss_kwargs)
self.loss_reduction = "sum"
# The score correction factor is not visible in the reference implementation
# of the original authors, since the calculation is performed with respect to
# the gradient and not the score. Roughly speaking, since the calculation
# comprises a *squared* distance, we need a factor of 1/2 in the forward pass.
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/tv.lua#L20-L30
self.score_correction_factor = 1.0 / 2.0 if impl_params else 1.0
def calculate_score(self, input_repr: torch.Tensor) -> torch.Tensor:
score = F.total_variation_loss(
input_repr, exponent=self.exponent, reduction=self.loss_reduction
)
return score * self.score_correction_factor
def regularization(
impl_params: bool = True,
hyper_parameters: Optional[HyperParameters] = None,
) -> TotalVariationLoss:
r"""Regularization from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :class:`pystiche_papers.li_wand_2016.TotalVariationLoss`
"""
if hyper_parameters is None:
hyper_parameters = _hyper_parameters()
return TotalVariationLoss(
impl_params=impl_params,
score_weight=hyper_parameters.regularization.score_weight,
)
def perceptual_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> loss.PerceptualLoss:
r"""Perceptual loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :func:`pystiche_papers.li_wand_2016.content_loss`
- :func:`pystiche_papers.li_wand_2016.style_loss`
- :func:`pystiche_papers.li_wand_2016.regularization`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters()
return loss.PerceptualLoss(
content_loss(
impl_params=impl_params,
multi_layer_encoder=multi_layer_encoder,
hyper_parameters=hyper_parameters,
),
style_loss(
impl_params=impl_params,
multi_layer_encoder=multi_layer_encoder,
hyper_parameters=hyper_parameters,
),
regularization(impl_params=impl_params, hyper_parameters=hyper_parameters),
)
| 0
| 0
| 0
| 5,385
| 0
| 488
| 0
| 128
| 186
|
1b3186c99a60818dc9d24b438538877520aa1347
| 2,640
|
py
|
Python
|
tests/conftest.py
|
Z2PackDev/bands_inspect
|
76fdb0130d9ff64c738365a1911bc61f035927f2
|
[
"Apache-2.0"
] | 1
|
2017-12-19T07:21:56.000Z
|
2017-12-19T07:21:56.000Z
|
tests/conftest.py
|
Z2PackDev/bands-inspect
|
76fdb0130d9ff64c738365a1911bc61f035927f2
|
[
"Apache-2.0"
] | 3
|
2018-02-27T09:07:46.000Z
|
2018-03-06T12:26:04.000Z
|
tests/conftest.py
|
Z2PackDev/bands_inspect
|
76fdb0130d9ff64c738365a1911bc61f035927f2
|
[
"Apache-2.0"
] | 1
|
2017-12-19T07:21:55.000Z
|
2017-12-19T07:21:55.000Z
|
# -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <[email protected]>
"""
Configuration file for the pytest tests.
"""
#--------------------------FIXTURES-------------------------------------#
| 30.697674
| 168
| 0.659848
|
# -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <[email protected]>
"""
Configuration file for the pytest tests.
"""
import os
import json
import pytest
import numpy as np
import bands_inspect as bi
import parameters # pylint: disable=wrong-import-order
#--------------------------FIXTURES-------------------------------------#
@pytest.fixture
def test_name(request):
"""Returns module_name.function_name for a given test"""
return request.module.__name__ + '/' + request._parent_request._pyfuncitem.name # pylint: disable=protected-access
@pytest.fixture
def compare_data(request, test_name, scope="session"): # pylint: disable=unused-argument,redefined-outer-name
"""Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function."""
def inner(compare_fct, data, tag=None):
full_name = test_name + (tag or '')
# get rid of json-specific quirks
# store as string because I cannot add the decoder to the pytest cache
data_str = json.dumps(data)
data = json.loads(data_str)
val = json.loads(request.config.cache.get(full_name, 'null'))
if val is None:
request.config.cache.set(full_name, data_str)
raise ValueError('Reference data does not exist.')
assert compare_fct(val, data)
return inner
@pytest.fixture
def compare_equal(compare_data): # pylint: disable=redefined-outer-name
"""
Returns a function which checks that a given data is equal to the stored reference.
"""
return lambda data, tag=None: compare_data(lambda x, y: x == y, data, tag)
@pytest.fixture
def assert_equal():
"""
Returns a function which checks that two bands-inspect object instances are equal.
"""
def inner(obj1, obj2):
if isinstance(obj1, bi.kpoints.KpointsBase):
np.testing.assert_equal(
obj1.kpoints_explicit, obj2.kpoints_explicit
)
elif isinstance(obj1, bi.eigenvals.EigenvalsData):
np.testing.assert_equal(
obj1.kpoints.kpoints_explicit, obj2.kpoints.kpoints_explicit
)
np.testing.assert_equal(obj1.eigenvals, obj2.eigenvals)
else:
raise ValueError("Unknown type {}".format(type(obj1)))
return inner
@pytest.fixture
def sample():
"""
Returns the absolute path of the sample with a given name.
"""
def inner(name):
return os.path.join(parameters.SAMPLES_DIR, name)
return inner
| 0
| 2,124
| 0
| 0
| 0
| 0
| 0
| -32
| 289
|
a365ad738e2f0d42460bbe15195bfcc181ad7c09
| 3,518
|
py
|
Python
|
src/tensorrt/tools/caffe_engine/call_engine_to_infer_all.py
|
aimuch/AIEnvConfig
|
4ccd54e9c601e8c91efebcec1a50115d75d0cf96
|
[
"MIT"
] | 250
|
2019-06-14T16:12:20.000Z
|
2022-03-27T09:56:26.000Z
|
src/tensorrt/tools/caffe_engine/call_engine_to_infer_all.py
|
aimuch/AIEnvConfig
|
4ccd54e9c601e8c91efebcec1a50115d75d0cf96
|
[
"MIT"
] | 6
|
2018-08-10T07:15:39.000Z
|
2018-10-23T01:51:17.000Z
|
src/tensorrt/tools/caffe_engine/call_engine_to_infer_all.py
|
aimuch/AIEnvConfig
|
4ccd54e9c601e8c91efebcec1a50115d75d0cf96
|
[
"MIT"
] | 41
|
2019-08-16T13:42:13.000Z
|
2022-02-23T03:38:09.000Z
|
# import tensorflow as tf
import tensorrt as trt
import pycuda.driver as cuda
# import uff
import numpy as np
TEST_PATH = "/media/andy/Data/DevWorkSpace/Projects/imageClassifier/data/test/"
LABEL = 0
ENGINE_PATH = "/home/andy/caffe/examples/mydata/slot_classifier/engine/px2_classifier.engine"
NET_INPUT_SHAPE = (256, 256)
NET_OUTPUT_SHAPE = 5
class_labels = ['error', 'half', 'invlb', 'invls', 'valid']
# Load Image
imgTestData = test_Loader(TEST_PATH, NET_INPUT_SHAPE)
# Load Engine file
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.ERROR)
engine = trt.utils.load_engine(G_LOGGER, ENGINE_PATH)
context = engine.create_execution_context()
runtime = trt.infer.create_infer_runtime(G_LOGGER)
# output = np.empty(1, dtype = np.float32)
# # Alocate device memory
# d_input = cuda.mem_alloc(1 * imgTestData[0][0][0].nbytes)
# d_output = cuda.mem_alloc(NET_OUTPUT_SHAPE * output.nbytes)
# bindings = [int(d_input), int(d_output)]
# stream = cuda.Stream()
predicts = []
pair = imgTestData[0]
for img, label in pair:
output = np.empty(NET_OUTPUT_SHAPE, dtype = np.float32)
# Alocate device memory
d_input = cuda.mem_alloc(1 * img.nbytes)
d_output = cuda.mem_alloc(1 * output.nbytes)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
# Transfer input data to device
cuda.memcpy_htod_async(d_input, img, stream)
# Execute model
context.enqueue(1, bindings, stream.handle, None)
# Transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
# Syncronize threads
stream.synchronize()
softmax = np.exp(output) / np.sum(np.exp(output))
predict = np.argmax(softmax)
predicts.append(predict)
print("True = ",label, ", predict = ", predict, ", softmax = ", softmax)
grandTrue = np.array(imgTestData[1][1])
predicts = np.array(predicts)
error = predicts[predicts!=grandTrue]
print(imgTestData[1][1])
print("-------")
print(predicts)
print("-------")
print(len(error))
print((len(imgTestData[0])-len(error))/len(imgTestData[0]))
| 30.327586
| 105
| 0.677658
|
import os
# import tensorflow as tf
import tensorrt as trt
from tensorrt.parsers import uffparser
import pycuda.driver as cuda
# import uff
import cv2
import numpy as np
from tqdm import tqdm
TEST_PATH = "/media/andy/Data/DevWorkSpace/Projects/imageClassifier/data/test/"
LABEL = 0
ENGINE_PATH = "/home/andy/caffe/examples/mydata/slot_classifier/engine/px2_classifier.engine"
NET_INPUT_SHAPE = (256, 256)
NET_OUTPUT_SHAPE = 5
class_labels = ['error', 'half', 'invlb', 'invls', 'valid']
# Load Image
def load_image(img_path, net_input_shape):
img = cv2.resize(cv2.imread(img_path), net_input_shape)
# BGR -> RGB
#img = img[:,:, (2, 1, 0)]
## Method 1
# imgT = np.transpose(img, (2, 0, 1)) # c,w,h
# imgF = np.asarray(imgT, dtype=np.float32)
# mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
# imgS = np.subtract(imgF,mean)
## Method 2
imgF = np.asarray(img, dtype=np.float32)
mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
imgSS = np.subtract(imgF, mean)
imgS = np.transpose(imgSS, (2, 0, 1)) # CHW
# RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)
return np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous
def test_Loader(TEST_PATH, net_input_shape):
label_list = []
img_list = []
pair = []
folders = os.listdir(TEST_PATH)
for folder in folders:
folder_path = os.path.join(TEST_PATH, folder)
imgs = os.listdir(folder_path)
for img in tqdm(imgs):
img_path = os.path.join(folder_path, img)
img = load_image(img_path, net_input_shape)
label = class_labels.index(folder)
img_list.append(img)
label_list.append(label)
pair.append((img, label))
return pair, (img_list, label_list)
imgTestData = test_Loader(TEST_PATH, NET_INPUT_SHAPE)
# Load Engine file
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.ERROR)
engine = trt.utils.load_engine(G_LOGGER, ENGINE_PATH)
context = engine.create_execution_context()
runtime = trt.infer.create_infer_runtime(G_LOGGER)
# output = np.empty(1, dtype = np.float32)
# # Alocate device memory
# d_input = cuda.mem_alloc(1 * imgTestData[0][0][0].nbytes)
# d_output = cuda.mem_alloc(NET_OUTPUT_SHAPE * output.nbytes)
# bindings = [int(d_input), int(d_output)]
# stream = cuda.Stream()
predicts = []
pair = imgTestData[0]
for img, label in pair:
output = np.empty(NET_OUTPUT_SHAPE, dtype = np.float32)
# Alocate device memory
d_input = cuda.mem_alloc(1 * img.nbytes)
d_output = cuda.mem_alloc(1 * output.nbytes)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
# Transfer input data to device
cuda.memcpy_htod_async(d_input, img, stream)
# Execute model
context.enqueue(1, bindings, stream.handle, None)
# Transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
# Syncronize threads
stream.synchronize()
softmax = np.exp(output) / np.sum(np.exp(output))
predict = np.argmax(softmax)
predicts.append(predict)
print("True = ",label, ", predict = ", predict, ", softmax = ", softmax)
grandTrue = np.array(imgTestData[1][1])
predicts = np.array(predicts)
error = predicts[predicts!=grandTrue]
print(imgTestData[1][1])
print("-------")
print(predicts)
print("-------")
print(len(error))
print((len(imgTestData[0])-len(error))/len(imgTestData[0]))
| 0
| 0
| 0
| 0
| 0
| 1,347
| 0
| -6
| 133
|
3db8ec872b628c2d5573b83d71f828295df1aa7e
| 2,054
|
py
|
Python
|
machineLearning.py
|
z-Wind/EQOptimum
|
c046daec2c6218277a3fec9fa0c87bea0b30ff2f
|
[
"MIT"
] | null | null | null |
machineLearning.py
|
z-Wind/EQOptimum
|
c046daec2c6218277a3fec9fa0c87bea0b30ff2f
|
[
"MIT"
] | null | null | null |
machineLearning.py
|
z-Wind/EQOptimum
|
c046daec2c6218277a3fec9fa0c87bea0b30ff2f
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
if __name__ == "__main__":
# Create a random dataset
# [fc, bandwidth, gain]
n = 100
filtersNum = 1
X, Y = genXY(n=n, filtersNum=filtersNum)
# Fit regression model
regr = MLPRegressor(hidden_layer_sizes=(10,), max_iter=10000)
regr.fit(X, Y)
print('train loss', regr.loss_)
# Predict
X_test, Y_test = genXY(n=n, filtersNum=filtersNum)
print('test loss', ((Y_test - regr.predict(X_test)) ** 2).mean())
# paras = [(1e4, 2500, 3), (300, 201, 10), (400, 600, 5), (600, 200, 8),
# (2000, 3500, 13), (6000, 4000, 3), (8500, 6000, 2.75),]
paras = [(1e4, 2500, 3),]
f, db = filterModel(paras)
plt.semilogx(f, db, label="target", color='red')
y_pred = regr.predict([db])
f, db = filterModel(y_pred.reshape(filtersNum, 3))
plt.semilogx(f, db, label="NN")
plt.legend()
plt.show()
| 27.026316
| 82
| 0.556962
|
import filters
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import freqz
from sklearn.neural_network import MLPRegressor
def filterModel(x):
# [fc, bandwidth, gain]
w_final = None
db_final = 0
fs = 44100
for fc, BW, gain in x:
b, a = filters.bandpass_peaking(fc=fc, gain=gain, BW=BW)
w, h = freqz(b, a, worN=np.linspace(np.pi*2/fs*20, np.pi*2/fs*20e3, 500))
db = 20 * np.log10(abs(h))
w_final = w
db_final += db
# plt.semilogx(w_final * fs / (2*np.pi), db_final)
return w_final*fs/(2*np.pi), db_final
def genXY(n, filtersNum):
total = n * filtersNum
fc = np.random.uniform(20, 20e3, size=(total,1))
bw = np.random.uniform(100, 10000, size=(total,1))
gain = np.random.uniform(0, 20, size=(total,1))
Y = np.concatenate((fc,bw,gain), axis=1)
Y = Y.reshape(n, filtersNum, 3)
X = []
for paras in Y:
f, db = filterModel(paras)
X.append(db)
X = np.array(X)
Y = Y.reshape(n, filtersNum*3)
return X, Y
if __name__ == "__main__":
# Create a random dataset
# [fc, bandwidth, gain]
n = 100
filtersNum = 1
X, Y = genXY(n=n, filtersNum=filtersNum)
# Fit regression model
regr = MLPRegressor(hidden_layer_sizes=(10,), max_iter=10000)
regr.fit(X, Y)
print('train loss', regr.loss_)
# Predict
X_test, Y_test = genXY(n=n, filtersNum=filtersNum)
print('test loss', ((Y_test - regr.predict(X_test)) ** 2).mean())
# paras = [(1e4, 2500, 3), (300, 201, 10), (400, 600, 5), (600, 200, 8),
# (2000, 3500, 13), (6000, 4000, 3), (8500, 6000, 2.75),]
paras = [(1e4, 2500, 3),]
f, db = filterModel(paras)
plt.semilogx(f, db, label="target", color='red')
y_pred = regr.predict([db])
f, db = filterModel(y_pred.reshape(filtersNum, 3))
plt.semilogx(f, db, label="NN")
plt.legend()
plt.show()
| 0
| 0
| 0
| 0
| 0
| 921
| 0
| -1
| 123
|
6f7dc504b463999eb2e9b24300c31ee083334da5
| 980
|
py
|
Python
|
src/utils/dist.py
|
shaoeric/torch-atom
|
7688fc38c0d19fe4d13a9773115df911ffe6eaaa
|
[
"MIT"
] | 28
|
2022-03-06T06:04:54.000Z
|
2022-03-27T04:14:33.000Z
|
src/utils/dist.py
|
shaoeric/torch-atom
|
7688fc38c0d19fe4d13a9773115df911ffe6eaaa
|
[
"MIT"
] | null | null | null |
src/utils/dist.py
|
shaoeric/torch-atom
|
7688fc38c0d19fe4d13a9773115df911ffe6eaaa
|
[
"MIT"
] | 3
|
2022-03-11T07:01:58.000Z
|
2022-03-17T05:34:41.000Z
|
import torch.distributed as dist
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
| 22.790698
| 72
| 0.626531
|
import torch.distributed as dist
import torch
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def reduce_value(value, average=True):
world_size = get_world_size()
if world_size < 2:
return value
with torch.no_grad():
dist.all_reduce(value)
if average:
value /= world_size
return value
| 0
| 0
| 0
| 0
| 0
| 507
| 0
| -9
| 114
|
7d4f4e96803718430d878ca088bcaed92b3079cc
| 3,822
|
py
|
Python
|
base_pool/mysql_pool/mysql_views.py
|
zhanzhangwei/kafka-study
|
6be4167319b855c9560e92932aae628f87a5e680
|
[
"Apache-2.0"
] | null | null | null |
base_pool/mysql_pool/mysql_views.py
|
zhanzhangwei/kafka-study
|
6be4167319b855c9560e92932aae628f87a5e680
|
[
"Apache-2.0"
] | null | null | null |
base_pool/mysql_pool/mysql_views.py
|
zhanzhangwei/kafka-study
|
6be4167319b855c9560e92932aae628f87a5e680
|
[
"Apache-2.0"
] | null | null | null |
import pymysql
mysql_client = MysqlClient()
| 30.576
| 113
| 0.545526
|
import json
import pymysql
import datetime
from dbutils.pooled_db import PooledDB
import pymysql
from conf.common import *
class MysqlClient(object):
__pool = None
def __init__(self):
"""
:param mincached:连接池中空闲连接的初始数量
:param maxcached:连接池中空闲连接的最大数量
:param maxshared:共享连接的最大数量
:param maxconnections:创建连接池的最大数量
:param blocking:超过最大连接数量时候的表现,为True等待连接数量下降,为false直接报错处理
:param maxusage:单个连接的最大重复使用次数
:param setsession:optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to ...", "set time zone ..."]
:param reset:how connections should be reset when returned to the pool
(False or None to rollback transcations started with begin(),
True to always issue a rollback for safety's sake)
:param host:数据库ip地址
:param port:数据库端口
:param db:库名
:param user:用户名
:param passwd:密码
:param charset:字符编码
"""
mincached = 10
maxcached = 20
maxshared = 10
maxconnections = 200
blocking = True
maxusage = 100
setsession = None
reset = True
host = MYSQL_HOST
port = MYSQL_PORT
db = DATABASE
user = USER
passwd = PASSWORD
charset = 'utf8mb4'
if not self.__pool:
self.__class__.__pool = PooledDB(pymysql,
mincached, maxcached,
maxshared, maxconnections, blocking,
maxusage, setsession, reset,
host=host, port=port, db=db,
user=user, passwd=passwd,
charset=charset,
cursorclass=pymysql.cursors.DictCursor
)
self._conn = None
self._cursor = None
self.__get_conn()
def __get_conn(self):
self._conn = self.__pool.connection()
self._cursor = self._conn.cursor()
def close(self):
try:
self._cursor.close()
self._conn.close()
except Exception as e:
print(e)
def __execute(self, sql, param=()):
count = self._cursor.execute(sql, param)
print(count)
return count
@staticmethod
def __dict_datetime_obj_to_str(result_dict):
"""把字典里面的datatime对象转成字符串,使json转换不出错"""
if result_dict:
result_replace = {k: v.__str__() for k, v in result_dict.items() if isinstance(v, datetime.datetime)}
result_dict.update(result_replace)
return result_dict
def select_one(self, sql, param=()):
"""查询单个结果"""
count = self.__execute(sql, param)
result = self._cursor.fetchone()
""":type result:dict"""
result = self.__dict_datetime_obj_to_str(result)
return count, result
def select_many(self, sql, param=()):
"""
查询多个结果
:param sql: qsl语句
:param param: sql参数
:return: 结果数量和查询结果集
"""
count = self.__execute(sql, param)
result = self._cursor.fetchall()
""":type result:list"""
[self.__dict_datetime_obj_to_str(row_dict) for row_dict in result]
return count, result
def execute(self, sql, param=()):
count = self.__execute(sql, param)
return count
def begin(self):
"""开启事务"""
self._conn.autocommit(0)
def end(self, option='commit'):
"""结束事务"""
if option == 'commit':
self._conn.autocommit()
else:
self._conn.rollback()
mysql_client = MysqlClient()
| 492
| 280
| 0
| 3,199
| 0
| 0
| 0
| -2
| 134
|
fbbdf9d38ba25ab279b3c1a4de1e0e092ad03325
| 8,998
|
py
|
Python
|
scripts/jupyter_vdi.py
|
ScottWales/cosima-cookbook
|
0ed83e2165efe5badfca59e2dccf835ab7acecca
|
[
"Apache-2.0"
] | null | null | null |
scripts/jupyter_vdi.py
|
ScottWales/cosima-cookbook
|
0ed83e2165efe5badfca59e2dccf835ab7acecca
|
[
"Apache-2.0"
] | null | null | null |
scripts/jupyter_vdi.py
|
ScottWales/cosima-cookbook
|
0ed83e2165efe5badfca59e2dccf835ab7acecca
|
[
"Apache-2.0"
] | 1
|
2020-01-30T05:36:08.000Z
|
2020-01-30T05:36:08.000Z
|
#!/usr/bin/env python
"""
Script to launch a VDI session (or connect to already running session)
and start a Jupyter server on the VDI
A ssh tunnel from the local machine to the VDI is set up and the local
webbrowser is spawned.
This is a python3 script (uses unicode strings). If you don't have
python3 on your local machine, try installing Miniconda3
The only external module is pexpect which may need to be installed
using conda or pip.
Usage:
- if you use a password, the script will ask for your password when needed
- if you have already set up SSH public key with Strudel, try running
$ ssh-add ~/.ssh/MassiveLauncherKey
to add your public key to the ssh key agent.
Author: James Munroe, 2017
"""
from __future__ import print_function
import getpass
import pexpect
import os
import configparser
# Requires future module https://pypi.org/project/future/
from builtins import input
import logging
logging.basicConfig(format='[%(asctime)s jupyter_vdi.py] %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
try:
except ImportError:
is_mac = False
else:
is_mac = True
DEFAULTS = {
'user' : getpass.getuser(),
'JupyterPort' : '8889',
'BokehPort' : '8787',
'execHost' : 'vdi.nci.org.au'
}
verbose = 0
config_path = os.path.expanduser('~/cosima_cookbook.conf')
parser = configparser.ConfigParser(defaults=DEFAULTS)
if os.path.exists(config_path):
logging.info('Using config file: {}'.format(config_path))
parser.read(config_path)
else:
logging.warn('No config file found. Creating default {} file.'.format(config_path))
logging.warn('*** Please edit this file as needed. ***')
while DEFAULTS['user']==getpass.getuser() or DEFAULTS['user']=="":
DEFAULTS['user']=input('What is your NCI username? ')
parser = configparser.ConfigParser(defaults=DEFAULTS)
with open(config_path, 'w') as f:
parser.write(f)
params = parser.defaults()
def ssh(cmd, params, login_timeout=10):
"""
Run a remote command via SSH
"""
clean_params(params)
cmd = ("ssh -x -l {user} {exechost} " + cmd).format(**params)
if verbose > 0: logging.info(cmd)
s = pexpect.spawn(cmd)
# SSH pexpect logic taken from pxshh:
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
# First phase
if i == 0:
# New certificate -- always accept it.
# This is what you get if SSH does not have the remote host's
# public key stored in the 'known_hosts' cache.
s.sendline("yes")
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
if i == 1: # password or passphrase
if 'password' not in params:
params['password'] = getpass.getpass('password: ')
s.sendline(params['password'])
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
# TODO: check if ssh connection is successful
return s
def session(func, *args, **kwargs):
"""wrapper for sending session-ctl commands"""
cmd = '/opt/vdi/bin/session-ctl --configver=20151620513 ' + func
s = ssh(cmd, *args, **kwargs)
s.close()
return s
tunnel_started = False
tunnel = None
if __name__ == "__main__":
main_argv()
| 33.574627
| 146
| 0.629362
|
#!/usr/bin/env python
"""
Script to launch a VDI session (or connect to already running session)
and start a Jupyter server on the VDI
A ssh tunnel from the local machine to the VDI is set up and the local
webbrowser is spawned.
This is a python3 script (uses unicode strings). If you don't have
python3 on your local machine, try installing Miniconda3
The only external module is pexpect which may need to be installed
using conda or pip.
Usage:
- if you use a password, the script will ask for your password when needed
- if you have already set up SSH public key with Strudel, try running
$ ssh-add ~/.ssh/MassiveLauncherKey
to add your public key to the ssh key agent.
Author: James Munroe, 2017
"""
from __future__ import print_function
import re
import sys
import time
import getpass
import pexpect
import os
import configparser
# Requires future module https://pypi.org/project/future/
from builtins import input
import argparse
import logging
logging.basicConfig(format='[%(asctime)s jupyter_vdi.py] %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
try:
import appscript
except ImportError:
import webbrowser
is_mac = False
else:
is_mac = True
DEFAULTS = {
'user' : getpass.getuser(),
'JupyterPort' : '8889',
'BokehPort' : '8787',
'execHost' : 'vdi.nci.org.au'
}
verbose = 0
config_path = os.path.expanduser('~/cosima_cookbook.conf')
parser = configparser.ConfigParser(defaults=DEFAULTS)
if os.path.exists(config_path):
logging.info('Using config file: {}'.format(config_path))
parser.read(config_path)
else:
logging.warn('No config file found. Creating default {} file.'.format(config_path))
logging.warn('*** Please edit this file as needed. ***')
while DEFAULTS['user']==getpass.getuser() or DEFAULTS['user']=="":
DEFAULTS['user']=input('What is your NCI username? ')
parser = configparser.ConfigParser(defaults=DEFAULTS)
with open(config_path, 'w') as f:
parser.write(f)
params = parser.defaults()
def parse_args(args):
parser = argparse.ArgumentParser(description="Log into the VDI, start a jupyter notebook session and ssh tunnel to local machine")
parser.add_argument("-v","--verbose", help="Increase verbosity", action='count', default=0)
return parser.parse_args(args)
def clean_params(params):
for key, value in params.items():
try:
params[key] = value.decode()
except AttributeError:
pass
def ssh(cmd, params, login_timeout=10):
"""
Run a remote command via SSH
"""
clean_params(params)
cmd = ("ssh -x -l {user} {exechost} " + cmd).format(**params)
if verbose > 0: logging.info(cmd)
s = pexpect.spawn(cmd)
# SSH pexpect logic taken from pxshh:
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
# First phase
if i == 0:
# New certificate -- always accept it.
# This is what you get if SSH does not have the remote host's
# public key stored in the 'known_hosts' cache.
s.sendline("yes")
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
if i == 1: # password or passphrase
if 'password' not in params:
params['password'] = getpass.getpass('password: ')
s.sendline(params['password'])
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
# TODO: check if ssh connection is successful
return s
def session(func, *args, **kwargs):
"""wrapper for sending session-ctl commands"""
cmd = '/opt/vdi/bin/session-ctl --configver=20151620513 ' + func
s = ssh(cmd, *args, **kwargs)
s.close()
return s
def open_jupyter_url(params):
# Open browser locally
status = ''
url = 'http://localhost:{jupyterport}/?token={token}'.format(**params)
if is_mac:
status = "Using appscript to open {}".format(url)
safari = appscript.app("Safari")
safari.make(new=appscript.k.document, with_properties={appscript.k.URL: url})
else:
status = "Opening {}".format(url)
webbrowser.open(url)
return status
tunnel_started = False
tunnel = None
def start_tunnel(params):
# Create ssh tunnel for local access to jupyter notebook
cmd = ' '.join(['-N -f -L {jupyterport}:localhost:{jupyterport}',
'-L {bokehport}:localhost:{bokehport}'])
# This print statement is needed as there are /r/n line endings from
# the jupyter notebook output that are difficult to suppress
logging.info("Starting ssh tunnel...")
tunnel = ssh(cmd, params, login_timeout=2)
tunnel.expect (pexpect.EOF)
# Open web browser and log result
logging.info(open_jupyter_url(params))
def main(args):
# global verbose means it doesn't need to be passed to every routine
global verbose
verbose = args.verbose
logging.info("Checking SSH keys to VDI are configured...")
r = session('hello --partition main', params)
if r.exitstatus != 0:
# suggest setting up SSH keys
logging.error("Error with ssh keys/password and VDI.")
logging.error(" Incorrect user name in ~/cosima_cookbook.conf file?")
logging.error(" Edit ~/cosima_cookbook.conf before continuing.")
sys.exit(1)
logging.info("SSH keys configured OK")
logging.info("Determine if VDI session is already running...")
r = session('list-avail --partition main', params)
m = re.search('#~#id=(?P<jobid>(?P<jobidNumber>.*?))#~#state=(?P<state>.*?)(?:#~#time_rem=(?P<remainingWalltime>.*?))?#~#', r.before.decode())
if m is not None:
params.update(m.groupdict())
w = int(params['remainingWalltime'])
remainingWalltime = '{:02}:{:02}:{:02}'.format(
w // 3600, w % 3600 // 60, w % 60)
logging.info('Time remaining: %s', remainingWalltime)
# TODO: should give user option of starting a new session if the remaining walltime is short
else:
logging.info('No VDI session found')
logging.info("Launching a new VDI session...")
r = session('launch --partition main', params)
m = re.search('#~#id=(?P<jobid>(?P<jobidNumber>.*?))#~#',
r.before.decode())
if m is None:
logging.info('Unable to launch new VDI session:\n'+r.before.decode())
params.update(m.groupdict())
time.sleep(2) # TODO: instead of waiting, should check for confirmation
# use has-started
logging.info("Determine jobid for VDI session...{jobid}".format(**params))
logging.info("Get exechost for VDI session...")
r = session('get-host --jobid {jobid}', params)
m = re.search('#~#host=(?P<exechost>.*?)#~#', r.before.decode())
params.update(m.groupdict())
logging.info('exechost: {exechost}'.format(**params))
logging.info("Running Jupyter on VDI...")
setupconda = params.get('setupconda',
"""module use /g/data3/hh5/public/modules
&& module load conda/analysis3
""".replace('\n', ' '))
jupyterapp = params.get('jupyterapp', "notebook")
run_jupyter = "jupyter %s --no-browser --port {jupyterport}" % jupyterapp
run_jupyter = setupconda + ' && ' + run_jupyter
cmd = ' '.join(['-t', """'bash -l -c "%s"'""" % run_jupyter])
logging.info("Waiting for Jupyter to start...")
# Launch jupyter on VDI
s = ssh(cmd, params, login_timeout=2)
ret = s.expect('http://\S*:(?P<jupyterport>\d+)/\?token=(?P<token>[a-zA-Z0-9]+)')
if s.match:
params.update(s.match.groupdict())
start_tunnel(params)
else:
logging.info("Could not find url information in jupyter output")
sys.exit(1)
# Grab all the output up to the incorrect URL -- uses the token twice, which is unhelpful
ret = s.expect('http://.*')
logging.info("Use Control-C to stop the Notebook server and shut down all kernels (twice to skip confirmation)\n\n")
# give control over to user
s.interact()
logging.info('end of script')
# optional: terminate to close the vdi session?
def main_argv():
args = parse_args(sys.argv[1:])
main(args)
if __name__ == "__main__":
main_argv()
| 0
| 0
| 0
| 0
| 0
| 4,784
| 0
| -48
| 279
|
1fa6873ff966dcc647833979508b75f9d44bd7bd
| 2,703
|
py
|
Python
|
utils/data.py
|
YOUSIKI/PyTorch-FBS
|
5e94c3183f064ef5ed7f4b7d82b076056200b368
|
[
"Apache-2.0"
] | 10
|
2020-09-14T02:40:37.000Z
|
2022-01-13T11:13:36.000Z
|
utils/data.py
|
YOUSIKI/PyTorch-FBS
|
5e94c3183f064ef5ed7f4b7d82b076056200b368
|
[
"Apache-2.0"
] | 2
|
2020-11-28T05:48:45.000Z
|
2022-03-11T13:44:50.000Z
|
utils/data.py
|
YOUSIKI/PyTorch-FBS
|
5e94c3183f064ef5ed7f4b7d82b076056200b368
|
[
"Apache-2.0"
] | 2
|
2020-11-28T02:27:08.000Z
|
2021-11-24T03:10:10.000Z
|
# -*- coding=utf-8 -*-
__all__ = [
'tiny_imagenet',
'imagewoof2',
'imagenette2'
]
_default_batch_size = 32
_default_num_workers = 4
| 34.653846
| 69
| 0.532741
|
# -*- coding=utf-8 -*-
__all__ = [
'tiny_imagenet',
'imagewoof2',
'imagenette2'
]
import os
import torch
import torchvision
_default_batch_size = 32
_default_num_workers = 4
def _transform(train=True):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if train:
return torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
else:
return torchvision.transforms.Compose([
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
def tiny_imagenet(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'tiny-imagenet-200', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagewoof2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagewoof2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagenette2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagenette2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
| 0
| 0
| 0
| 0
| 0
| 2,418
| 0
| -24
| 159
|
838d22d0dea3f0cea788de6ba72e416ad4ef2add
| 1,917
|
py
|
Python
|
tests/e2e/runner.py
|
wilzbach/storyscript-sls
|
d71d74a53852ebae54bdaab341678b04f2775411
|
[
"Apache-2.0"
] | null | null | null |
tests/e2e/runner.py
|
wilzbach/storyscript-sls
|
d71d74a53852ebae54bdaab341678b04f2775411
|
[
"Apache-2.0"
] | null | null | null |
tests/e2e/runner.py
|
wilzbach/storyscript-sls
|
d71d74a53852ebae54bdaab341678b04f2775411
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env pytest
from tests.e2e.utils.fixtures import find_test_files
test_files = find_test_files(relative=True)
# compile a story and compare its completion with the expected tree
# load a story from the file system and load its expected result file (.json)
| 28.191176
| 77
| 0.720396
|
#!/usr/bin/env pytest
import io
import json
from os import path
from pytest import fixture, mark
from sls import App
import storyscript.hub.Hub as StoryHub
from storyhub.sdk.AutoUpdateThread import AutoUpdateThread
from tests.e2e.utils.features import parse_options
from tests.e2e.utils.fixtures import find_test_files, hub, test_dir
test_files = find_test_files(relative=True)
@fixture
def patched_storyhub(mocker, scope="module"):
mocker.patch.object(StoryHub, "StoryscriptHub", return_value=hub)
mocker.patch.object(AutoUpdateThread, "dispatch_update")
# compile a story and compare its completion with the expected tree
def run_test_completion(uri, source, expected, patch, options):
action = options.pop("action", "complete")
if action == "complete":
result = App(hub=hub).complete(uri=uri, text=source, **options)
else:
assert action == "click"
result = App(hub=hub).click(uri=uri, text=source, **options)
assert result == expected
# load a story from the file system and load its expected result file (.json)
def run_test(story_path, patch):
story_string = None
with io.open(story_path, "r") as f:
story_string = f.read()
expected_path = path.splitext(story_path)[0]
assert path.isfile(
expected_path + ".json"
), f"Path: `{expected_path}.json` does not exist."
expected_completion = None
with io.open(expected_path + ".json", "r") as f:
expected_completion = f.read()
# deserialize the expected completion
expected = json.loads(expected_completion)
options = parse_options(story_string)
return run_test_completion(
story_path, story_string, expected, patch, options
)
@mark.usefixtures("patched_storyhub")
@mark.parametrize("test_file", test_files)
def test_story(test_file, patch):
test_file = path.join(test_dir, test_file)
run_test(test_file, patch)
| 0
| 335
| 0
| 0
| 0
| 955
| 0
| 83
| 270
|
a028f9eab21f99b975a3ac640714e3b636189bcc
| 342
|
py
|
Python
|
Misc/Become_a_Python_Developer/2_Programming Fundamentals in the Real World/Ex_Files_Programming_Realworld/Exercise Files/Ch05/05_03/start_05_03_coordinates.py
|
specter01wj/LAB-Lynda
|
1915ada66f4498cdf15a0e2a068c938e325e9ba3
|
[
"MIT"
] | null | null | null |
Misc/Become_a_Python_Developer/2_Programming Fundamentals in the Real World/Ex_Files_Programming_Realworld/Exercise Files/Ch05/05_03/start_05_03_coordinates.py
|
specter01wj/LAB-Lynda
|
1915ada66f4498cdf15a0e2a068c938e325e9ba3
|
[
"MIT"
] | 8
|
2020-07-08T06:20:03.000Z
|
2022-03-02T10:05:06.000Z
|
Misc/Become_a_Python_Developer/2_Programming Fundamentals in the Real World/Ex_Files_Programming_Realworld/Exercise Files/Ch05/05_03/start_05_03_coordinates.py
|
specter01wj/LAB-Lynda
|
1915ada66f4498cdf15a0e2a068c938e325e9ba3
|
[
"MIT"
] | null | null | null |
""" Where's My Mouse? """
import tkinter
root = tkinter.Tk()
root.bind('<Button>', mouse_click)
root.mainloop()
| 22.8
| 39
| 0.599415
|
""" Where's My Mouse? """
import tkinter
def mouse_click(event):
# retrieve XY coords as a tuple
coords = root.winfo_pointerxy()
print('coords: {}'.format(coords))
print('X: {}'.format(coords[0]))
print('Y: {}'.format(coords[1]))
root = tkinter.Tk()
root.bind('<Button>', mouse_click)
root.mainloop()
| 0
| 0
| 0
| 0
| 0
| 194
| 0
| 0
| 25
|
b485f685ca90029c0dd0acd04f32bc0b55820f14
| 2,906
|
py
|
Python
|
examples/fsm/bot/middleware.py
|
ExpressApp/pybotx
|
97c8b1ce5d45a05567ed01d545cb43174a2dcbb9
|
[
"MIT"
] | 13
|
2021-01-21T12:43:10.000Z
|
2022-03-23T11:11:59.000Z
|
examples/fsm/bot/middleware.py
|
ExpressApp/pybotx
|
97c8b1ce5d45a05567ed01d545cb43174a2dcbb9
|
[
"MIT"
] | 259
|
2020-02-26T08:51:03.000Z
|
2022-03-23T11:08:36.000Z
|
examples/fsm/bot/middleware.py
|
ExpressApp/pybotx
|
97c8b1ce5d45a05567ed01d545cb43174a2dcbb9
|
[
"MIT"
] | 5
|
2019-12-02T16:19:22.000Z
|
2021-11-22T20:33:34.000Z
|
from typing import Final
_default_transition: Final = object()
| 32.651685
| 87
| 0.639023
|
from dataclasses import dataclass
from enum import Enum
from typing import Callable, Dict, Final, Optional, Type, Union
from botx import Bot, Collector, Message
from botx.concurrency import callable_to_coroutine
from botx.middlewares.base import BaseMiddleware
from botx.typing import Executor
_default_transition: Final = object()
@dataclass
class Transition:
on_failure: Optional[Union[Enum, object]] = _default_transition
on_success: Optional[Union[Enum, object]] = _default_transition
class FlowError(Exception):
pass
class FSM:
def __init__(self, states: Type[Enum]) -> None:
self.transitions: Dict[Enum, Transition] = {}
self.collector = Collector()
self.states = states
def handler(
self,
on_state: Enum,
next_state: Optional[Union[Enum, object]] = _default_transition,
on_failure: Optional[Union[Enum, object]] = _default_transition,
) -> Callable:
def decorator(handler: Callable) -> Callable:
self.collector.add_handler(
handler,
body=on_state.name,
name=on_state.name,
include_in_status=False,
)
self.transitions[on_state] = Transition(
on_success=next_state, on_failure=on_failure,
)
return handler
return decorator
def change_state(message: Message, new_state: Optional[Enum]) -> None:
message.bot.state.fsm_state[(message.user_huid, message.group_chat_id)] = new_state
class FSMMiddleware(BaseMiddleware):
def __init__(
self,
executor: Executor,
bot: Bot,
fsm: FSM,
initial_state: Optional[Enum] = None,
) -> None:
super().__init__(executor)
bot.state.fsm_state = {}
self.fsm = fsm
self.initial_state = initial_state
for state in self.fsm.states:
# check that for each state there is registered handler
assert state in self.fsm.transitions
async def dispatch(self, message: Message, call_next: Executor) -> None:
current_state: Enum = message.bot.state.fsm_state.setdefault(
(message.user_huid, message.group_chat_id), self.initial_state,
)
if current_state is not None:
transition = self.fsm.transitions[current_state]
handler = self.fsm.collector.handler_for(current_state.name)
try:
await handler(message)
except Exception as exc:
if transition.on_failure is not _default_transition:
change_state(message, transition.on_failure)
raise exc
else:
if transition.on_success is not _default_transition:
change_state(message, transition.on_success)
else:
await callable_to_coroutine(call_next, message)
| 0
| 143
| 858
| 1,314
| 0
| 137
| 0
| 137
| 248
|
42f3981074dbd8b6458eb716c4608442ffca1db6
| 6,411
|
py
|
Python
|
webenmr/lib/convrdc.py
|
andreagia/WEBNMR
|
512a8cc04cf69300796585feae722614501389a9
|
[
"Apache-2.0"
] | null | null | null |
webenmr/lib/convrdc.py
|
andreagia/WEBNMR
|
512a8cc04cf69300796585feae722614501389a9
|
[
"Apache-2.0"
] | null | null | null |
webenmr/lib/convrdc.py
|
andreagia/WEBNMR
|
512a8cc04cf69300796585feae722614501389a9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
This program attempts to convert XPLOR Pseudocontact shift restraints in AMBER format
XPLOR:
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) (resid 200 and name Y ) ( resid 13 and name C ) 0.2400 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y ) ( resid 13 and name CA ) 0.4300 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y )( resid 13 and name CB ) 0.1000 0.2000
AMBER:
&align
num_datasets=2,
dcut= -1.0, freezemol= .false.,
ndip= 10, dwt= 5*0.1, 5*0.1
gigj= 5*-3.1631,5*-3.1631,
dij= 5*1.041,5*1.041,
s11= -4.236,-4.236
s12= 56.860,56.860
s13= -34.696,-34.696
s22= -27.361,-27.361
s23= -12.867,-12.867
dataset=1,
id(1)=20, jd(1)=19, dobsl(1)=-2.13, dobsu(1)=-2.13,
id(2)=31, jd(2)=30, dobsl(2)= 1.10, dobsu(2)= 1.10,
id(3)=43, jd(3)=42, dobsl(3)=-5.54, dobsu(3)=-5.54,
...
...
&end
'''
import os
from optparse import OptionParser
if __name__ == '__main__':
usage = "usage: %prog -w working_directory -p pdb_filename -o out_filename"
parser = OptionParser(usage)
parser.add_option("-w", "--wdir", dest="wd",
help="Working directory", metavar="WORKDIR")
parser.add_option("-p", "--pdbfile", dest="pdbfile",
help="PDB filename", metavar="FILE")
parser.add_option("-o", "--outfile", dest="outfile",
help="Output filename", metavar="FILE")
(options, args) = parser.parse_args()
if not options.wd:
parser.error("Working directory is required")
wd=os.path.abspath(options.wd)+'/'
if options.pdbfile:
pdbfile=os.path.join(wd, options.pdbfile)
else:
parser.error("PDB filename is required")
if options.outfile:
outfile=os.path.join(wd, options.outfile)
else:
parser.error("Output filename is required")
xml_input=os.path.join(wd,'input.xml')
doc = etree.parse(xml_input)
ndoc = etree.tostring(doc)
new=parse_node(etree.fromstring(ndoc))
out=convert(pdbfile, new, wd)
fout=open(outfile,'w')
fout.writelines(out)
fout.close()
| 31.426471
| 154
| 0.511777
|
#!/usr/bin/env python
'''
This program attempts to convert XPLOR Pseudocontact shift restraints in AMBER format
XPLOR:
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) (resid 200 and name Y ) ( resid 13 and name C ) 0.2400 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y ) ( resid 13 and name CA ) 0.4300 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y )( resid 13 and name CB ) 0.1000 0.2000
AMBER:
&align
num_datasets=2,
dcut= -1.0, freezemol= .false.,
ndip= 10, dwt= 5*0.1, 5*0.1
gigj= 5*-3.1631,5*-3.1631,
dij= 5*1.041,5*1.041,
s11= -4.236,-4.236
s12= 56.860,56.860
s13= -34.696,-34.696
s22= -27.361,-27.361
s23= -12.867,-12.867
dataset=1,
id(1)=20, jd(1)=19, dobsl(1)=-2.13, dobsu(1)=-2.13,
id(2)=31, jd(2)=30, dobsl(2)= 1.10, dobsu(2)= 1.10,
id(3)=43, jd(3)=42, dobsl(3)=-5.54, dobsu(3)=-5.54,
...
...
&end
'''
import sys
import os
import commands
from optparse import OptionParser
from xml_parser import *
from normalize_tbl import normalize
from constants import convtable
def searchres(nres, lpdb):
for l in lpdb:
if l.strip().lower().startswith('atom'):
s=l.split()
if int(nres)==int(s[4]):
return s[3]
def searchC(outx):
i=0
c=[]
while i<len(outx):
if outx[i].strip().startswith('XDIPO_RDC>frun'):
while i<len(outx):
i+=1
if i>=len(outx):
break
if outx[i].strip().startswith('C1='):
t=[]
l=outx[i].split()
for x in range(1,len(l),2):
t.append(l[x])
c.append(t)
break
i+=1
return c
def convert(pdb, new, wd):
if new.calculation.protocol.xrdc:
xfiles=[]
if len(new.calculation.protocol.xrdc)==1:
xfiles.append(new.calculation.protocol.xrdc.attrib_.xrdc_file)
else:
for i in range(len(new.calculation.protocol.xrdc)):
xfiles.append(new.calculation.protocol.xrdc[i].attrib_.xrdc_file)
else:
sys.exit('%s: RDC not found\n' % sys.argv[0])
try:
lpdb=open(pdb, 'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, pdb, strerror))
numMap = {}
for l in lpdb:
if l.strip().lower().startswith('atom'):
ls=l.split()
k='%s:%s' % (ls[4],ls[2])
numMap[k]=ls[1]
cmd=' /opt/local_prog/xplor-nih-2.22/bin/xplor tensor.inp'
outx=commands.getoutput(cmd)
outx=outx.split('\n')
#outx=open('xplor.outx').readlines()
c=searchC(outx)
out=[' &align\n']
out.append(' num_datasets=%d,\n' % len(xfiles))
out.append(' dcut=-1.0, freezemol=.false.,\n')
out.append(' ndip=10,')
out.append(' dcut=-1.0,dwt=92*0.1,\n')
out.append(' gigj=92*-3.163,\n')
out.append(' dij=92*1.01,\n')
s11=' s11='
s12=' s12='
s13=' s13='
s22=' s22='
s23=' s23='
for i in range(len(c)):
s11='%s%s,' % (s11, c[i][0])
s12='%s%s,' % (s12, c[i][1])
s13='%s%s,' % (s13, c[i][2])
s22='%s%s,' % (s22, c[i][3])
s23='%s%s,' % (s23, c[i][4])
out.append('%s\n' % s11)
out.append('%s\n' % s12)
out.append('%s\n' % s13)
out.append('%s\n' % s22)
out.append('%s\n' % s23)
counter=0
nrdc=0
for xfile in xfiles:
counter+=1
nxfile=os.path.join(wd, 'rdc_%d_web_enmr_normalized.tbl' % counter)
xfile=os.path.join(wd, xfile)
try:
normalize(xfile, nxfile, new, wd)
except:
sys.exit('%s: unable to normalize %s tbl file\n' % (sys.argv[0], xfile))
try:
xp=open(nxfile,'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, nxfile, strerror))
out.append(' dataset=%d,\n' % counter)
for l in xp:
if l.strip().startswith('assign'):
nrdc+=1
ls=l.split()
res=searchres(ls[31], lpdb)
kk='%s:%s' % (res, ls[34])
if convtable.has_key(kk):
ls[34]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[31], ls[34])
natm1=numMap[k]
res=searchres(ls[38], lpdb)
kk='%s:%s' % (res, ls[41])
if convtable.has_key(kk):
ls[41]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[38], ls[41])
natm2=numMap[k]
out.append(' id(%s)=%s, jd(%s)=%s, dobsl(%s)=%s, dobsu(%s)=%s, \n' %
(nrdc, natm1, nrdc, natm2, nrdc, ls[43], nrdc, ls[43]))
out[3]=' ndip=%d,' % nrdc
out.append(' &end')
return out
if __name__ == '__main__':
usage = "usage: %prog -w working_directory -p pdb_filename -o out_filename"
parser = OptionParser(usage)
parser.add_option("-w", "--wdir", dest="wd",
help="Working directory", metavar="WORKDIR")
parser.add_option("-p", "--pdbfile", dest="pdbfile",
help="PDB filename", metavar="FILE")
parser.add_option("-o", "--outfile", dest="outfile",
help="Output filename", metavar="FILE")
(options, args) = parser.parse_args()
if not options.wd:
parser.error("Working directory is required")
wd=os.path.abspath(options.wd)+'/'
if options.pdbfile:
pdbfile=os.path.join(wd, options.pdbfile)
else:
parser.error("PDB filename is required")
if options.outfile:
outfile=os.path.join(wd, options.outfile)
else:
parser.error("Output filename is required")
xml_input=os.path.join(wd,'input.xml')
doc = etree.parse(xml_input)
ndoc = etree.tostring(doc)
new=parse_node(etree.fromstring(ndoc))
out=convert(pdbfile, new, wd)
fout=open(outfile,'w')
fout.writelines(out)
fout.close()
| 0
| 0
| 0
| 0
| 0
| 3,870
| 0
| 10
| 200
|
1441c3ed71c2dc67d784d782e0dab2d91d827d06
| 2,134
|
py
|
Python
|
lptrack/versions.py
|
gieseladev/lptrack
|
fb4c64021c23522f96733db41ceb69f0ccb9b713
|
[
"MIT"
] | null | null | null |
lptrack/versions.py
|
gieseladev/lptrack
|
fb4c64021c23522f96733db41ceb69f0ccb9b713
|
[
"MIT"
] | null | null | null |
lptrack/versions.py
|
gieseladev/lptrack
|
fb4c64021c23522f96733db41ceb69f0ccb9b713
|
[
"MIT"
] | null | null | null |
"""Versioned body readers and writers for track message bodies.
Attributes:
LATEST_VERSION (int): Latest version supported by the library.
"""
from typing import Callable
from . import TrackInfo, codec
LATEST_VERSION = 2
ReaderType = Callable[[codec.Reader], TrackInfo]
WriterType = Callable[[codec.Writer, TrackInfo], None]
_FORMAT_VERSIONS = {
1: (read_body_v1, write_body_v1),
2: (read_body_v2, write_body_v2),
}
def get_reader(version: int) -> ReaderType:
"""Get a body reader for the given version.
Raises:
ValueError: If the version isn't supported.
"""
return _get_format(version)[0]
def get_writer(version: int) -> WriterType:
"""Get a body writer for the given version.
Raises:
ValueError: If the version isn't supported.
"""
return _get_format(version)[1]
| 25.404762
| 83
| 0.698219
|
"""Versioned body readers and writers for track message bodies.
Attributes:
LATEST_VERSION (int): Latest version supported by the library.
"""
from typing import Callable, Tuple
from . import TrackInfo, codec
LATEST_VERSION = 2
def _read_body_v1_2(stream: codec.Reader, version: int) -> TrackInfo:
return TrackInfo(
title=stream.read_utf(),
author=stream.read_utf(),
duration=stream.read_long() / 1000,
identifier=stream.read_utf(),
is_stream=stream.read_bool(),
uri=stream.read_optional_utf() if version >= 2 else None,
)
def read_body_v1(stream: codec.Reader) -> TrackInfo:
return _read_body_v1_2(stream, 1)
def read_body_v2(stream: codec.Reader) -> TrackInfo:
return _read_body_v1_2(stream, 2)
def _write_body_v1_2(stream: codec.Writer, track: TrackInfo, version: int) -> None:
stream.write_utf(track.title)
stream.write_utf(track.author)
stream.write_long(int(track.duration * 1000))
stream.write_utf(track.identifier)
stream.write_bool(track.is_stream)
if version >= 2:
stream.write_optional_utf(track.uri)
def write_body_v1(stream: codec.Writer, track: TrackInfo) -> None:
_write_body_v1_2(stream, track, 1)
def write_body_v2(stream: codec.Writer, track: TrackInfo) -> None:
_write_body_v1_2(stream, track, 2)
ReaderType = Callable[[codec.Reader], TrackInfo]
WriterType = Callable[[codec.Writer, TrackInfo], None]
_FORMAT_VERSIONS = {
1: (read_body_v1, write_body_v1),
2: (read_body_v2, write_body_v2),
}
def _get_format(version: int) -> Tuple:
try:
return _FORMAT_VERSIONS[version]
except KeyError:
raise ValueError(f"Unsupported version: {version}") from None
def get_reader(version: int) -> ReaderType:
"""Get a body reader for the given version.
Raises:
ValueError: If the version isn't supported.
"""
return _get_format(version)[0]
def get_writer(version: int) -> WriterType:
"""Get a body writer for the given version.
Raises:
ValueError: If the version isn't supported.
"""
return _get_format(version)[1]
| 0
| 0
| 0
| 0
| 0
| 1,120
| 0
| 7
| 161
|
45b20d04060d1b766f35010e3ce9fedfd6a34eba
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/core/toml/__init__.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/core/toml/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/core/toml/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/f3/de/85/7dca1e096a43e00e6ff1ca900dda1ca91c8c5c3a1d6798e466a9173a00
| 96
| 96
| 0.895833
|
/home/runner/.cache/pip/pool/f3/de/85/7dca1e096a43e00e6ff1ca900dda1ca91c8c5c3a1d6798e466a9173a00
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4053282fdcb4c61c6094cfb3f6a832822c2a096c
| 2,371
|
py
|
Python
|
venv/lib/python2.7/site-packages/cement/ext/ext_alarm.py
|
zwachtel11/fruitful-backend
|
45b8994917182e7b684b9e25944cc79c9494c9f3
|
[
"MIT"
] | 95
|
2018-06-05T10:49:32.000Z
|
2019-12-31T11:07:36.000Z
|
v_env/lib/python3.7/site-packages/cement/ext/ext_alarm.py
|
buds-lab/expanded-psychrometric-chart
|
e7267f57584d8ba645507189ea4a8e474c67e0de
|
[
"MIT"
] | 51
|
2019-10-08T01:53:02.000Z
|
2021-06-04T22:02:21.000Z
|
v_env/lib/python3.7/site-packages/cement/ext/ext_alarm.py
|
buds-lab/expanded-psychrometric-chart
|
e7267f57584d8ba645507189ea4a8e474c67e0de
|
[
"MIT"
] | 29
|
2018-09-17T06:10:32.000Z
|
2022-03-19T13:15:30.000Z
|
"""
The Alarm Extension provides easy access to setting an application alarm to
handle timing out operations. See the
`Python Signal Library <https://docs.python.org/3.5/library/signal.html>`_.
Requirements
------------
* No external dependencies.
* Only available on Unix/Linux
Configuration
-------------
This extension does not honor any application configuration settings.
Usage
-----
.. code-block:: python
import time
from cement.core.foundation import CementApp
from cement.core.exc import CaughtSignal
class MyApp(CementApp):
class Meta:
label = 'myapp'
exit_on_close = True
extensions = ['alarm']
with MyApp() as app:
try:
app.run()
app.alarm.set(3, "The operation timed out after 3 seconds!")
# do something that takes time to operate
time.sleep(5)
app.alarm.stop()
except CaughtSignal as e:
print(e.msg)
app.exit_code = 1
Looks like:
.. code-block:: console
$ python myapp.py
ERROR: The operation timed out after 3 seconds!
Caught signal 14
"""
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
| 22.158879
| 75
| 0.619148
|
"""
The Alarm Extension provides easy access to setting an application alarm to
handle timing out operations. See the
`Python Signal Library <https://docs.python.org/3.5/library/signal.html>`_.
Requirements
------------
* No external dependencies.
* Only available on Unix/Linux
Configuration
-------------
This extension does not honor any application configuration settings.
Usage
-----
.. code-block:: python
import time
from cement.core.foundation import CementApp
from cement.core.exc import CaughtSignal
class MyApp(CementApp):
class Meta:
label = 'myapp'
exit_on_close = True
extensions = ['alarm']
with MyApp() as app:
try:
app.run()
app.alarm.set(3, "The operation timed out after 3 seconds!")
# do something that takes time to operate
time.sleep(5)
app.alarm.stop()
except CaughtSignal as e:
print(e.msg)
app.exit_code = 1
Looks like:
.. code-block:: console
$ python myapp.py
ERROR: The operation timed out after 3 seconds!
Caught signal 14
"""
import signal
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
def alarm_handler(app, signum, frame):
if signum == signal.SIGALRM:
app.log.error(app.alarm.msg)
class AlarmManager(object):
"""
Lets the developer easily set and stop an alarm. If the
alarm exceeds the given time it will raise ``signal.SIGALRM``.
"""
def __init__(self, *args, **kw):
super(AlarmManager, self).__init__(*args, **kw)
self.msg = None
def set(self, time, msg):
"""
Set the application alarm to ``time`` seconds. If the time is
exceeded ``signal.SIGALRM`` is raised.
:param time: The time in seconds to set the alarm to.
:param msg: The message to display if the alarm is triggered.
"""
LOG.debug('setting application alarm for %s seconds' % time)
self.msg = msg
signal.alarm(int(time))
def stop(self):
"""
Stop the application alarm.
"""
LOG.debug('stopping application alarm')
signal.alarm(0)
def load(app):
app.catch_signal(signal.SIGALRM)
app.extend('alarm', AlarmManager())
app.hook.register('signal', alarm_handler)
| 0
| 0
| 0
| 853
| 0
| 204
| 0
| -8
| 92
|
77ab3b36a849175fa4c24f12a76941077ea58584
| 570
|
py
|
Python
|
scripts/docker/migrate.py
|
guligon90/uac-registry
|
cb5afe941919c2d9ceffa8d8bf220613b7a20613
|
[
"MIT"
] | null | null | null |
scripts/docker/migrate.py
|
guligon90/uac-registry
|
cb5afe941919c2d9ceffa8d8bf220613b7a20613
|
[
"MIT"
] | null | null | null |
scripts/docker/migrate.py
|
guligon90/uac-registry
|
cb5afe941919c2d9ceffa8d8bf220613b7a20613
|
[
"MIT"
] | null | null | null |
# Base imports
# Project imports
| 31.666667
| 82
| 0.670175
|
# Base imports
import subprocess
from typing import Iterable, Optional
# Project imports
from docker import common
from docker.run import run
def migrate(arguments: Iterable[str], deps: Optional[bool] = True) -> int:
print(">>>>>>>>>> Running database migration <<<<<<<<<<")
run(['backend', 'python3', common.MANAGE_PY, 'migrate'], deps)
def make_migrations(arguments: Iterable[str], deps: Optional[bool] = True) -> int:
print(">>>>>>>>>> Running database migration <<<<<<<<<<")
run(['backend', 'python3', common.MANAGE_PY, 'makemigrations'], deps)
| 0
| 0
| 0
| 0
| 0
| 379
| 0
| 21
| 134
|
f979d82751598eba221d7677df764b4451b8c896
| 971
|
py
|
Python
|
adw_test/make_small_dataset.py
|
clinfo/DeepKF
|
ee4f1be28e5f3bfa46bb47dbdc4d5f678eed36c1
|
[
"MIT"
] | 5
|
2019-12-19T13:33:36.000Z
|
2021-06-01T06:08:16.000Z
|
adw_test/make_small_dataset.py
|
clinfo/DeepKF
|
ee4f1be28e5f3bfa46bb47dbdc4d5f678eed36c1
|
[
"MIT"
] | 24
|
2020-03-03T19:40:55.000Z
|
2021-05-26T15:27:38.000Z
|
adw_test/make_small_dataset.py
|
clinfo/DeepKF
|
ee4f1be28e5f3bfa46bb47dbdc4d5f678eed36c1
|
[
"MIT"
] | 1
|
2019-12-19T13:35:07.000Z
|
2019-12-19T13:35:07.000Z
|
import json
import glob
import numpy as np
import os
path = "data_state_space_v3/"
out_path = "small_data/"
files = glob.glob(path + "*.npy") #
train_data_num = 100
test_data_num = 10
train_data = {}
test_data = {}
for filename in files:
obj = np.load(filename)
if filename.find("_test.npy") >= 0:
test_data[filename] = obj
else:
train_data[filename] = obj
os.makedirs(out_path, exist_ok=True)
for k, v in train_data.items():
b = os.path.basename(k)
print(b, v.shape)
o = v[:train_data_num]
np.save(out_path + b, o)
for k, v in test_data.items():
b = os.path.basename(k)
print(b, v.shape)
o = v[:test_data_num]
np.save(out_path + b, o)
fp = open(path + "pack_selected_info.json")
obj = json.load(fp)
obj["pid_list_train"] = obj["pid_list_train"][:train_data_num]
obj["pid_list_test"] = obj["pid_list_test"][:test_data_num]
fp = open(out_path + "pack_selected_info.json", "w")
json.dump(obj, fp)
| 26.243243
| 62
| 0.669413
|
import json
import glob
import numpy as np
import os
path = "data_state_space_v3/"
out_path = "small_data/"
files = glob.glob(path + "*.npy") # ワイルドカードが使用可能
train_data_num = 100
test_data_num = 10
train_data = {}
test_data = {}
for filename in files:
obj = np.load(filename)
if filename.find("_test.npy") >= 0:
test_data[filename] = obj
else:
train_data[filename] = obj
os.makedirs(out_path, exist_ok=True)
for k, v in train_data.items():
b = os.path.basename(k)
print(b, v.shape)
o = v[:train_data_num]
np.save(out_path + b, o)
for k, v in test_data.items():
b = os.path.basename(k)
print(b, v.shape)
o = v[:test_data_num]
np.save(out_path + b, o)
fp = open(path + "pack_selected_info.json")
obj = json.load(fp)
obj["pid_list_train"] = obj["pid_list_train"][:train_data_num]
obj["pid_list_test"] = obj["pid_list_test"][:test_data_num]
fp = open(out_path + "pack_selected_info.json", "w")
json.dump(obj, fp)
| 36
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
991fa516fb5524187777ee16359f8b1f0cb6ad59
| 859
|
py
|
Python
|
3M/W9/7.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | 2
|
2021-11-25T13:38:36.000Z
|
2021-11-25T13:42:56.000Z
|
3M/W9/7.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | null | null | null |
3M/W9/7.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | 1
|
2021-11-25T13:38:43.000Z
|
2021-11-25T13:38:43.000Z
|
# https://www.hackerrank.com/challenges/three-month-preparation-kit-maxsubarray/problem
#!/bin/python3
import os
#
# Complete the 'maxSubarray' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts INTEGER_ARRAY arr as parameter.
#
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input().strip())
for t_itr in range(t):
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = maxSubarray(arr)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 21.475
| 87
| 0.615832
|
# https://www.hackerrank.com/challenges/three-month-preparation-kit-maxsubarray/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'maxSubarray' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts INTEGER_ARRAY arr as parameter.
#
def maxSubarray(arr):
p = max(0,arr[0])
l = e = m = arr[0]
for z in arr[1:]:
e,m,l,p = max(z,e+z),max(m,max(z,e+z)),max(l,z),max(0,z)+p
return m,l if(l<0) else p
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input().strip())
for t_itr in range(t):
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = maxSubarray(arr)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 0
| 0
| 0
| 0
| 0
| 164
| 0
| -41
| 112
|
End of preview. Expand
in Data Studio
This is a dataset originated from bigcode/the-stack-dedup with some filters applied. The filters filtered in this dataset are:
- remove_non_ascii
- remove_decorators
- remove_async
- remove_classes
- remove_generators
- remove_function_no_docstring
- remove_class_no_docstring
- remove_unused_imports
- remove_delete_markers
- Downloads last month
- 7