text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
r"""
Use robot framework API to extract test result data from output.xml generated
by robot tests. For more information on the Robot Framework API, see
http://robot-framework.readthedocs.io/en/3.0/autodoc/robot.result.html
"""
import sys
import os
import getopt
import csv
import robot.errors
import re
import stat
import datetime
from robot.api import ExecutionResult
from robot.result.visitor import ResultVisitor
from xml.etree import ElementTree
# Remove the python library path to restore with local project path later.
save_path_0 = sys.path[0]
del sys.path[0]
sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib"))
from gen_arg import *
from gen_print import *
from gen_valid import *
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
this_program = sys.argv[0]
info = " For more information: " + this_program + ' -h'
if len(sys.argv) == 1:
print(info)
sys.exit(1)
parser = argparse.ArgumentParser(
usage=info,
description="%(prog)s uses a robot framework API to extract test result\
data from output.xml generated by robot tests. For more information on the\
Robot Framework API, see\
http://robot-framework.readthedocs.io/en/3.0/autodoc/robot.result.html",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
parser.add_argument(
'--source',
'-s',
help='The output.xml robot test result file path. This parameter is \
required.')
parser.add_argument(
'--dest',
'-d',
help='The directory path where the generated .csv files will go. This \
parameter is required.')
parser.add_argument(
'--version_id',
help='Driver version of openbmc firmware which was used during test,\
e.g. "v2.1-215-g6e7eacb". This parameter is required.')
parser.add_argument(
'--platform',
help='OpenBMC platform which was used during test,\
e.g. "Witherspoon". This parameter is required.')
parser.add_argument(
'--level',
help='OpenBMC release level which was used during test,\
e.g. "Master", "OBMC920". This parameter is required.')
parser.add_argument(
'--test_phase',
help='Name of testing phase, e.g. "DVT", "SVT", etc.\
This parameter is optional.',
default="FVT")
parser.add_argument(
'--subsystem',
help='Name of the subsystem, e.g. "OPENBMC" etc.\
This parameter is optional.',
default="OPENBMC")
parser.add_argument(
'--processor',
help='Name of processor, e.g. "P9". This parameter is optional.',
default="OPENPOWER")
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 0), ("debug", 0)]
def exit_function(signal_number=0,
frame=None):
r"""
Execute whenever the program ends normally or with the signals that we
catch (i.e. TERM, INT).
"""
dprint_executing()
dprint_var(signal_number)
qprint_pgm_footer()
def signal_handler(signal_number,
frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, the
program would terminate immediately with return code 143 and without
calling the exit_function.
"""
# Our convention is to set up exit_function with atexit.register() so
# there is no need to explicitly call exit_function from here.
dprint_executing()
# Calling exit prevents us from returning to the code that was running
# when the signal was received.
exit(0)
def validate_parms():
r"""
Validate program parameters, etc. Return True or False (i.e. pass/fail)
accordingly.
"""
if not valid_file_path(source):
return False
if not valid_dir_path(dest):
return False
gen_post_validation(exit_function, signal_handler)
return True
def parse_output_xml(xml_file_path, csv_dir_path, version_id, platform, level,
test_phase, processor):
r"""
Parse the robot-generated output.xml file and extract various test
output data. Put the extracted information into a csv file in the "dest"
folder.
Description of argument(s):
xml_file_path The path to a Robot-generated output.xml
file.
csv_dir_path The path to the directory that is to
contain the .csv files generated by
this function.
version_id Version of the openbmc firmware
(e.g. "v2.1-215-g6e7eacb").
platform Platform of the openbmc system.
level Release level of the OpenBMC system
(e.g. "Master").
"""
# Initialize tallies
total_critical_tc = 0
total_critical_passed = 0
total_critical_failed = 0
total_non_critical_tc = 0
total_non_critical_passed = 0
total_non_critical_failed = 0
result = ExecutionResult(xml_file_path)
result.configure(stat_config={'suite_stat_level': 2,
'tag_stat_combine': 'tagANDanother'})
stats = result.statistics
print("--------------------------------------")
try:
total_critical_tc = stats.total.critical.passed + stats.total.critical.failed
total_critical_passed = stats.total.critical.passed
total_critical_failed = stats.total.critical.failed
except AttributeError:
pass
try:
total_non_critical_tc = stats.total.passed + stats.total.failed
total_non_critical_passed = stats.total.passed
total_non_critical_failed = stats.total.failed
except AttributeError:
pass
print("Total Test Count:\t %d" % (total_non_critical_tc + total_critical_tc))
print("Total Critical Test Failed:\t %d" % total_critical_failed)
print("Total Critical Test Passed:\t %d" % total_critical_passed)
print("Total Non-Critical Test Failed:\t %d" % total_non_critical_failed)
print("Total Non-Critical Test Passed:\t %d" % total_non_critical_passed)
print("Test Start Time:\t %s" % result.suite.starttime)
print("Test End Time:\t\t %s" % result.suite.endtime)
print("--------------------------------------")
# Use ResultVisitor object and save off the test data info
class TestResult(ResultVisitor):
def __init__(self):
self.testData = []
def visit_test(self, test):
self.testData += [test]
collectDataObj = TestResult()
result.visit(collectDataObj)
# Write the result statistics attributes to CSV file
l_csvlist = []
# Default Test data
l_test_type = test_phase
l_pse_rel = 'Master'
if level:
l_pse_rel = level
l_env = 'HW'
l_proc = processor
l_platform_type = ""
l_func_area = ""
# System data from XML meta data
# l_system_info = get_system_details(xml_file_path)
# First let us try to collect information from keyboard input
# If keyboard input cannot give both information, then find from xml file.
if version_id and platform:
l_driver = version_id
l_platform_type = platform
print("BMC Version_id:%s" % version_id)
print("BMC Platform:%s" % platform)
else:
# System data from XML meta data
l_system_info = get_system_details(xml_file_path)
l_driver = l_system_info[0]
l_platform_type = l_system_info[1]
# Driver version id and platform are mandatorily required for CSV file
# generation. If any one is not avaulable, exit CSV file generation
# process.
if l_driver and l_platform_type:
print("Driver and system info set.")
else:
print("Both driver and system info need to be set.\
CSV file is not generated.")
sys.exit()
# Default header
l_header = ['test_start', 'test_end', 'subsys', 'test_type',
'test_result', 'test_name', 'pse_rel', 'driver',
'env', 'proc', 'platform_type', 'test_func_area']
l_csvlist.append(l_header)
# Generate CSV file onto the path with current time stamp
l_base_dir = csv_dir_path
l_timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
# Example: 2017-02-20-08-47-22_Witherspoon.csv
l_csvfile = l_base_dir + l_timestamp + "_" + l_platform_type + ".csv"
print("Writing data into csv file:%s" % l_csvfile)
for testcase in collectDataObj.testData:
# Functional Area: Suite Name
# Test Name: Test Case Name
l_func_area = str(testcase.parent).split(' ', 1)[1]
l_test_name = str(testcase)
# Test Result pass=0 fail=1
if testcase.status == 'PASS':
l_test_result = 0
else:
l_test_result = 1
# Format datetime from robot output.xml to "%Y-%m-%d-%H-%M-%S"
l_stime = xml_to_csv_time(testcase.starttime)
l_etime = xml_to_csv_time(testcase.endtime)
# Data Sequence: test_start,test_end,subsys,test_type,
# test_result,test_name,pse_rel,driver,
# env,proc,platform_type,test_func_area,
l_data = [l_stime, l_etime, subsystem, l_test_type, l_test_result,
l_test_name, l_pse_rel, l_driver, l_env, l_proc,
l_platform_type, l_func_area]
l_csvlist.append(l_data)
# Open the file and write to the CSV file
l_file = open(l_csvfile, "w")
l_writer = csv.writer(l_file, lineterminator='\n')
l_writer.writerows(l_csvlist)
l_file.close()
# Set file permissions 666.
perm = stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP + stat.S_IWGRP + stat.S_IROTH + stat.S_IWOTH
os.chmod(l_csvfile, perm)
def xml_to_csv_time(xml_datetime):
r"""
Convert the time from %Y%m%d %H:%M:%S.%f format to %Y-%m-%d-%H-%M-%S format
and return it.
Description of argument(s):
datetime The date in the following format: %Y%m%d
%H:%M:%S.%f (This is the format
typically found in an XML file.)
The date returned will be in the following format: %Y-%m-%d-%H-%M-%S
"""
# 20170206 05:05:19.342
l_str = datetime.datetime.strptime(xml_datetime, "%Y%m%d %H:%M:%S.%f")
# 2017-02-06-05-05-19
l_str = l_str.strftime("%Y-%m-%d-%H-%M-%S")
return str(l_str)
def get_system_details(xml_file_path):
r"""
Get the system data from output.xml generated by robot and return it.
The list returned will be in the following order: [driver,platform]
Description of argument(s):
xml_file_path The relative or absolute path to the
output.xml file.
"""
bmc_version_id = ""
bmc_platform = ""
with open(xml_file_path, 'rt') as output:
tree = ElementTree.parse(output)
for node in tree.iter('msg'):
# /etc/os-release output is logged in the XML as msg
# Example: ${output} = VERSION_ID="v1.99.2-71-gbc49f79"
if '${output} = VERSION_ID=' in node.text:
# Get BMC version (e.g. v1.99.1-96-g2a46570)
bmc_version_id = str(node.text.split("VERSION_ID=")[1])[1:-1]
# Platform is logged in the XML as msg.
# Example: ${bmc_model} = Witherspoon BMC
if '${bmc_model} = ' in node.text:
bmc_platform = node.text.split(" = ")[1]
print_vars(bmc_version_id, bmc_platform)
return [str(bmc_version_id), str(bmc_platform)]
def main():
if not gen_get_options(parser, stock_list):
return False
if not validate_parms():
return False
qprint_pgm_header()
parse_output_xml(source, dest, version_id, platform, level,
test_phase, processor)
return True
# Main
if not main():
exit(1)
|
openbmc/openbmc-test-automation
|
tools/ct_metrics/gen_csv_results.py
|
Python
|
apache-2.0
| 11,930
|
[
"VisIt"
] |
cd88005dfe6fd8e756608229c1932bae6eb4ea7552705fdd287236e32117b91f
|
#! /usr/bin/env python
import simulation
import unittest
import numpy as np
import copy
from basic_defs import *
def int_r(f):
""" Convert to nearest integer. """
return int(np.round(f))
# a simple mock source layer
class SimpleNeurons(object):
def __init__(self, N, out_step=1, out_fct=None):
self.N = N
self.out_step = out_step
self.out_fct = out_fct
def prepare(self, t_max, dt):
if self.out_fct is None:
if np.size(self.out_step) == 1:
self.out = self.out_step*np.ones(self.N)
else:
self.out = np.copy(self.out_step)
else:
self.out = self.out_fct(0)
self.i = 0
def evolve(self, t, dt):
if self.out_fct is None:
self.out += self.out_step
else:
self.out = self.out_fct(self.i)
self.i += 1
##################
# TestSimulation #
##################
class TestSimulation(unittest.TestCase):
def test_creation(self):
""" Create a simulation. """
sim = simulation.Simulation(1, 2, [1, 2])
self.assertSequenceEqual(sim.agents, [1, 2, [1, 2]])
def test_number_of_steps(self):
""" Test the number of steps executed upon run. """
class Mock(object):
def __init__(self):
self.count = 0
def evolve(self, t, dt):
self.count += 1
G = Mock()
sim = simulation.Simulation(G, dt=0.1)
sim.run(100.0)
self.assertEqual(G.count, 1000)
G = Mock()
sim = simulation.Simulation(G, dt=0.2)
sim.run(100.2)
self.assertEqual(G.count, 501)
def test_prepare_on_run(self):
""" Ensure that the agents' `prepare` method is called on `run()`. """
class Mock(object):
def __init__(self):
self.t_max = None
self.dt = None
def evolve(self, t, dt):
pass
def prepare(self, t_max, dt):
self.t_max = t_max
self.dt = dt
t_max = 10.0
dt = 0.2
G = Mock()
sim = simulation.Simulation(G, dt=dt)
self.assertIsNone(G.t_max)
self.assertIsNone(G.dt)
sim.run(t_max)
self.assertEqual(G.t_max, t_max)
self.assertEqual(G.dt, dt)
def test_evolution(self):
""" Test time-evolution in a simulation. """
class Mock(object):
def __init__(self):
self.t = 0.0
def evolve(self1, t, dt):
self.assertEqual(self1.t, t)
self1.t += dt
G1 = Mock()
G2 = Mock()
sim = simulation.Simulation(G1, G2)
sim.run(100.0)
self.assertAlmostEqual(G1.t, 100.0)
def test_order(self):
""" Test that the agents are evolved in the correct order. """
n = 3
was_called = n*[False]
class Mock(object):
def __init__(self, i):
self.i = i
def evolve(self1, t, dt):
was_called[self1.i] = True
self.assertTrue(all(was_called[:self1.i]))
sim = simulation.Simulation(*[Mock(_) for _ in xrange(n)])
sim.run(sim.dt)
def test_custom_order_exec(self):
""" Test that the execution order of the agents can be modified. """
self.call_idx = 0
class Mock(object):
def __init__(self, order):
self.order = order
self.execd = -1
def evolve(self1, t, dt):
self1.execd = self.call_idx
self.call_idx += 1
G1 = Mock(3)
G2 = Mock(-1)
G3 = Mock(2)
sim = simulation.Simulation(G1, G2, G3)
sim.run(sim.dt)
self.assertEqual(G1.execd, 2)
self.assertEqual(G2.execd, 0)
self.assertEqual(G3.execd, 1)
def test_custom_order_prepare(self):
""" Test that the preparation order of the agents can be modified. """
self.call_idx = 0
class Mock(object):
def __init__(self, order):
self.order = order
self.execd = -1
def evolve(self, t, dt):
pass
def prepare(self1, t_max, dt):
self1.execd = self.call_idx
self.call_idx += 1
G1 = Mock(1)
G2 = Mock(10)
G3 = Mock(-5)
sim = simulation.Simulation(G1, G2, G3)
sim.run(sim.dt)
self.assertEqual(G1.execd, 1)
self.assertEqual(G2.execd, 2)
self.assertEqual(G3.execd, 0)
def test_timestep(self):
""" Test changing the simulation time step. """
class Mock(object):
def __init__(self):
self.t = 0.0
self.dt = None
def evolve(self1, t, dt):
if self1.dt is not None:
self.assertAlmostEqual(self1.dt, dt)
else:
self1.dt = dt
self.assertAlmostEqual(self1.t, t)
self1.t += self1.dt
t_max = 10.0
dt = 0.2
G = Mock()
simulation.Simulation(G, dt=dt).run(t_max)
self.assertAlmostEqual(G.dt, dt)
####################
# TestEventMonitor #
####################
class TestEventMonitor(unittest.TestCase):
def setUp(self):
# generate pseudo-random test case
np.random.seed(123456)
class Spiker(object):
def __init__(self, pattern):
self.N = pattern.shape[0]
self.pattern = pattern
self.i = 0
self.spike = np.zeros(self.N, dtype=bool)
self.other = np.zeros(self.N, dtype=bool)
def evolve(self, t, dt):
self.spike = self.pattern[:, self.i]
self.other = ~self.pattern[:, self.i]
self.i += 1
self.t_max = 16.0 # duration of simulation
self.dt = 2.0 # time step
self.N = 15 # number of units
self.p = 0.2 # probability of spiking
self.G = Spiker(np.random.rand(self.N, int_r(self.t_max/self.dt)) < self.p)
def test_init(self):
""" Test that monitor is properly initialized. """
M = simulation.EventMonitor(self.G)
self.assertTrue(hasattr(M, 't'))
self.assertTrue(hasattr(M, 'i'))
self.assertEqual(len(M.t), 0)
self.assertEqual(len(M.i), 0)
def test_shape(self):
""" Test that time and index vectors have matching lengths. """
M = simulation.EventMonitor(self.G)
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
self.assertEqual(len(M.t), len(M.i))
def test_spike_order(self):
""" Test that the spike times are ordered in ascending order. """
M = simulation.EventMonitor(self.G)
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
self.assertNotEqual(len(M.t), 0)
self.assertTrue(all(M.t[i] <= M.t[i+1] for i in xrange(len(M.t) - 1)))
def test_order(self):
""" Test that by default the monitor is called after its target. """
class Mock(object):
def __init__(self):
self.spike = [False]
def evolve(self, t, dt):
self.spike = [True]
G0 = Mock()
M0 = simulation.EventMonitor(G0)
sim = simulation.Simulation(G0, M0)
sim.run(sim.dt)
self.assertEqual(len(M0.t), 1)
self.assertEqual(len(M0.i), 1)
def test_accurate(self):
""" Test that all the events are properly stored. """
M = simulation.EventMonitor(self.G)
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
times = self.G.pattern.nonzero()[1]*self.dt
self.assertTrue(np.allclose(sorted(times), M.t))
for (i, t) in zip(M.i, M.t):
self.assertTrue(self.G.pattern[i, int_r(t/self.dt)])
def test_other_event(self):
""" Test following a different event. """
M = simulation.EventMonitor(self.G, event='other')
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
times = (~self.G.pattern).nonzero()[1]*self.dt
self.assertTrue(np.allclose(sorted(times), M.t))
for (i, t) in zip(M.i, M.t):
self.assertFalse(self.G.pattern[i, int_r(t/self.dt)])
####################
# TestStateMonitor #
####################
class TestStateMonitor(unittest.TestCase):
def setUp(self):
# make something to track
class Mock(object):
def __init__(self, N):
self.N = N
self.v = np.zeros(N)
self.a = np.zeros(2)
self.b = np.zeros(N, dtype=bool)
self.f = 0.0
def evolve(self, t, dt):
self.v += np.arange(self.N)*dt
self.a += dt
self.b = ((np.arange(self.N) + int_r(t/dt)) % 3 ==0)
self.f = t
self.N = 15
self.t_max = 10.0
self.dt = 0.1
self.G = Mock(self.N)
def test_time(self):
""" Test that time is stored properly. """
M = simulation.StateMonitor(self.G, 'v')
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, self.dt)))
def test_custom_time(self):
""" Test that time is stored properly with custom interval. """
interval = 0.5
M = simulation.StateMonitor(self.G, 'v', interval=interval)
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, interval)))
def test_shape(self):
""" Test that the shape of the data storage is correct. """
M = simulation.StateMonitor(self.G, ['a', 'v'])
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
nsteps = int_r(self.t_max/self.dt)
self.assertEqual(M.v.shape, (self.N, nsteps))
self.assertEqual(M.a.shape, (2, nsteps))
def test_shape_interval(self):
""" Test correct shape with custom interval. """
interval = 0.5
M = simulation.StateMonitor(self.G, ['a', 'v', 'b'], interval=interval)
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
nsteps = int_r(self.t_max/interval)
self.assertEqual(M.v.shape, (self.N, nsteps))
self.assertEqual(M.a.shape, (2, nsteps))
self.assertEqual(M.b.shape, (self.N, nsteps))
def test_1d_function(self):
""" Test storage for 1d function. """
M = simulation.StateMonitor(self.G, 'f')
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
self.assertTrue(np.allclose(M.f, M.t))
def test_accurate(self):
""" Test accuracy of storage. """
M = simulation.StateMonitor(self.G, ['v', 'a', 'b'])
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
v_expected = np.array([i*(M.t+self.dt) for i in xrange(self.N)])
a_expected = np.array([(M.t+self.dt) for i in xrange(2)])
b_expected = [((i + np.round(M.t/sim.dt)).astype(int) % 3) == 0
for i in xrange(self.N)]
self.assertTrue(np.allclose(M.v, v_expected))
self.assertTrue(np.allclose(M.a, a_expected))
self.assertTrue(np.allclose(M.b, b_expected))
def test_accurate_interval(self):
""" Test that storage is accurate with custom interval. """
interval = 0.5
M = simulation.StateMonitor(self.G, 'v', interval=interval)
sim = simulation.Simulation(self.G, M, dt=self.dt)
sim.run(self.t_max)
v_expected = np.array([i*(M.t + self.dt) for i in xrange(self.N)])
self.assertTrue(np.allclose(M.v, v_expected))
####################
# TestTableSpikers #
####################
class TestTableSpikers(unittest.TestCase):
def test_correct_spiking(self):
""" Test that spiking happens when it should. """
n = 10
t_max = 25.0
dt = 0.2
p = 0.05
# some reproducible arbitrariness
np.random.seed(622312)
n_steps = int_r(t_max/dt)
table = np.random.rand(n_steps, n) < p
G = TableSpikers(n)
G.spike_table = copy.copy(table)
class SimpleMonitor(object):
def __init__(self, target):
self.target = target;
self.results = []
self.order = 1
def evolve(self, t, dt):
idxs = self.target.spike.nonzero()[0]
self.results.extend([(int_r(t/dt), i) for i in idxs])
M = SimpleMonitor(G)
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
expected = zip(*table.nonzero())
self.assertSequenceEqual(expected, M.results)
def test_no_spike_after_table(self):
""" Test that there are no more spikes past the end of the table. """
n = 5
dt = 1.0
t_max = 2*dt
# make sure we have spikes at the end
table = np.ones((1, n))
G = TableSpikers(n)
G.spike_table = table
sim = simulation.Simulation(G, dt=dt)
sim.run(t_max)
self.assertFalse(np.any(G.spike))
def test_out(self):
""" Test generation of output field. """
t_max = 24.0
dt = 0.1
n_steps = int_r(t_max/dt)
spike_t = 5.0
spike_n = int_r(spike_t/dt)
G = TableSpikers(1)
table = np.zeros((n_steps, 1))
table[spike_n, 0] = True
G.spike_table = table
Mo = simulation.StateMonitor(G, 'out')
sim = simulation.Simulation(G, Mo, dt=dt)
sim.run(t_max)
mask = (Mo.t > spike_t)
out_t = Mo.t[mask]
out_y = Mo.out[0, mask]
# there is a timing shift here between the actual output and the "expected"
# one; I don't think this is an issue
expected = out_y[0]*np.power(1 - dt/G.tau_out, (out_t - spike_t)/dt - 1)
self.assertLess(np.mean(np.abs(out_y - expected)), 1e-6)
####################
# TestHVCLikeLayer #
####################
class TestHVCLikeLayer(unittest.TestCase):
def test_jitter(self):
""" Test that there are differences in spiking between trials. """
# some reproducible arbitrariness
np.random.seed(343143)
n = 10
t_max = 25
dt = 0.1
G = HVCLikeLayer(n)
M1 = simulation.EventMonitor(G)
sim1 = simulation.Simulation(G, M1, dt=dt)
sim1.run(t_max)
M2 = simulation.EventMonitor(G)
sim2 = simulation.Simulation(G, M2, dt=dt)
sim2.run(t_max)
self.assertNotEqual(M1.t, M2.t)
def test_no_jitter(self):
""" Test that repeated noiseless trials are identical. """
# some reproducible arbitrariness
np.random.seed(3249823)
n = 10
t_max = 25
dt = 0.1
G = HVCLikeLayer(n)
G.burst_noise = 0.0
G.spike_noise = 0.0
M1 = simulation.EventMonitor(G)
sim1 = simulation.Simulation(G, M1, dt=dt)
sim1.run(t_max)
M2 = simulation.EventMonitor(G)
sim2 = simulation.Simulation(G, M2, dt=dt)
sim2.run(t_max)
self.assertEqual(M1.t, M2.t)
def test_uniform(self):
""" Test that there are spikes all along the simulation window. """
# some reproducible arbitrariness
np.random.seed(87548)
n = 50
t_max = 50
dt = 0.1
resolution = 1.0
class UniformityChecker(object):
def __init__(self, target, resolution):
self.target = target
self.resolution = resolution
self.order = 1
def prepare(self, t_max, dt):
self.has_spike = np.zeros(int_r(t_max/self.resolution) + 1)
def evolve(self, t, dt):
i = int_r(t/self.resolution)
self.has_spike[i] = (self.has_spike[i] or np.any(self.target.spike))
G = HVCLikeLayer(n)
M = UniformityChecker(G, resolution)
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
self.assertTrue(np.all(M.has_spike))
def test_burst(self):
""" Test that each neuron fires a burst of given width and n_spikes. """
n = 25
t_max = 50
dt = 0.1
G = HVCLikeLayer(n)
G.burst_noise = 0.0
G.spike_noise = 0.0
M = simulation.EventMonitor(G)
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
# split spikes by neuron index
spikes = [np.asarray(M.t)[np.asarray(M.i) == i] for i in xrange(n)]
self.assertTrue(np.all(len(_) == G.spikes_per_burst) for _ in spikes)
burst_lengths = [_[-1] - _[0] for _ in spikes]
self.assertLess(np.std(burst_lengths), dt/2)
self.assertLess(np.abs(np.mean(burst_lengths) - 1000*(G.spikes_per_burst-1)
/ G.rate_during_burst), dt/2)
def test_firing_rate_during_burst(self):
# some reproducible arbitrariness
np.random.seed(43245)
n = 25
t_max = 50
dt = 0.1
G = HVCLikeLayer(n)
M = simulation.EventMonitor(G)
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
# split spikes by neuron index
spikes = [np.asarray(M.t)[np.asarray(M.i) == i] for i in xrange(n)]
# check that inter-spike intervals are in the correct range
isi = [np.diff(_) for _ in spikes]
isi_max = [np.max(_) for _ in isi]
isi_min = [np.max(_) for _ in isi]
spike_dt = 1000.0/G.rate_during_burst
self.assertLess(np.max(isi_max), spike_dt + G.spike_noise + dt/2)
self.assertGreater(np.min(isi_min), spike_dt - G.spike_noise - dt/2)
def test_burst_dispersion(self):
""" Test that starting times of bursts are within required bounds. """
# some reproducible arbitrariness
np.random.seed(7342642)
n = 25
t_max = 50
dt = 0.1
n_sim = 10
G = HVCLikeLayer(n)
burst_starts = []
for i in xrange(n_sim):
M = simulation.EventMonitor(G)
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
# split spikes by neuron index
spikes = [np.asarray(M.t)[np.asarray(M.i) == i] for i in xrange(n)]
burst_starts.append([_[0] for _ in spikes])
burst_starts_range = [np.ptp([_[i] for _ in burst_starts])
for i in xrange(n)]
self.assertLess(np.max(burst_starts_range), G.burst_noise + dt/2)
def test_burst_tmax(self):
""" Test using a different end time for bursts than for simulation. """
n = 25
t_max = 50
dt = 0.1
G = HVCLikeLayer(n)
G.burst_noise = 0.0
G.spike_noise = 0.0
M1 = simulation.EventMonitor(G)
sim1 = simulation.Simulation(G, M1, dt=dt)
sim1.run(t_max)
G = HVCLikeLayer(n, burst_tmax=50)
G.burst_noise = 0.0
G.spike_noise = 0.0
M2 = simulation.EventMonitor(G)
sim2 = simulation.Simulation(G, M2, dt=dt)
sim2.run(2*t_max)
self.assertTrue(np.allclose(M1.t, M2.t))
self.assertTrue(np.allclose(M1.i, M2.i))
####################
# TestRandomLayer #
####################
class TestRandomLayer(unittest.TestCase):
def test_variability(self):
""" Test that output is variable. """
# some reproducible arbitrariness
np.random.seed(343143)
n = 10
t_max = 20.0
dt = 0.1
G = RandomLayer(n)
M1 = simulation.EventMonitor(G)
sim1 = simulation.Simulation(G, M1, dt=dt)
sim1.run(t_max)
M2 = simulation.EventMonitor(G)
sim2 = simulation.Simulation(G, M2, dt=dt)
sim2.run(t_max)
self.assertNotEqual(len(M1.t), 0)
self.assertNotEqual(len(M2.t), 0)
self.assertNotEqual(M1.t, M2.t)
def test_init_out_with_rate(self):
""" Test that initial value of `out` is given by `rate`. """
n = 3
rates = [10, 50, 90]
G = RandomLayer(n, ini_rate=rates)
G.prepare(10.0, 0.1)
self.assertLess(np.max(np.abs(G.out - rates)), 1e-9)
####################
# TestStudentLayer #
####################
class TestStudentLayer(unittest.TestCase):
def test_dynamics_no_tau_ref(self):
""" Test dynamics with no refractory period. """
n = 50
t_max = 100.0
dt = 0.1
G = StudentLayer(n)
G.tau_ref = 0.0
i_values = np.linspace(0.01, 0.4, 50)
for i_ext in i_values:
# start with different initial voltages to take advantage of averaging
# effects
G.v_init = np.linspace(G.vR, G.v_th, n, endpoint=False)
G.i_ext_init = i_ext
M = simulation.EventMonitor(G)
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
rate = float(len(M.t))/n/t_max*1000.0
# first source of uncertainty: a spike might not fit before the end of a
# simulation
uncertainty1 = 1.0/np.sqrt(n)/t_max*1000.0
expected = 0.0
uncertainty = uncertainty1
if G.R*i_ext > G.v_th - G.vR:
expected = 1000.0/(G.tau_m*np.log(G.R*i_ext/(G.vR-G.v_th+G.R*i_ext)))
# second source of uncertainty: spikes might move due to the granularity
# of the simulation
uncertainty2 = dt*expected*rate/1000.0
uncertainty = uncertainty1 + uncertainty2
uncertainty *= 1.5
self.assertLess(np.abs(rate - expected), uncertainty)
else:
self.assertAlmostEqual(rate, 0.0)
def test_dynamics_with_tau_ref(self):
""" Test dynamics with refractory period. """
n = 10
t_max = 100.0
dt = 0.1
G = StudentLayer(n)
i_values = np.linspace(0.02, 0.4, 28)
different = 0
for i_ext in i_values:
# start with different initial voltages to take advantage of averaging
# effects
G.v_init = np.linspace(G.vR, G.v_th, n, endpoint=False)
G.i_ext_init = i_ext
M = simulation.EventMonitor(G)
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
rate = float(len(M.t))/n/t_max*1000.0
# first source of uncertainty: a spike might not fit before the end of a
# simulation
uncertainty1 = 1.0/np.sqrt(n)/t_max*1000.0
expected0 = 0.0
expected = 0.0
if G.R*i_ext > G.v_th - G.vR:
expected0 = 1000.0/(G.tau_m*np.log(G.R*i_ext/(G.vR-G.v_th+G.R*i_ext)))
expected = expected0/(1 + expected0*G.tau_ref/1000.0)
# second source of uncertainty: spikes might move due to the granularity
# of the simulation
uncertainty2 = dt*expected*rate/1000.0
uncertainty = uncertainty1 + uncertainty2
self.assertLess(np.abs(rate - expected), uncertainty)
if np.abs(expected - expected0) >= uncertainty:
different += 1
else:
self.assertAlmostEqual(rate, 0.0)
# make sure that in most cases the firing rate using the refractory period
# was significantly different from the case without refractory period
self.assertGreater(different, len(i_values)*2/3)
def test_v_bounds(self):
""" Test that the membrane potential stays below threshold potential. """
n = 50
t_max = 100.0
dt = 0.1
G = StudentLayer(n)
G.i_ext_init = np.linspace(-1.0, 1.0, n)
class BoundsChecker(object):
def __init__(self, target):
self.target = target
self.small = None
self.large = None
self.order = 1
def evolve(self, t, dt):
small = np.min(self.target.v)
large = np.max(self.target.v)
if self.small is None or self.small > small:
self.small = small
if self.large is None or self.large < large:
self.large = large
M = BoundsChecker(G)
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
self.assertLess(M.large, G.v_th)
def test_out(self):
""" Test generation of output field. """
t_max = 24.0
dt = 0.1
G = StudentLayer(1)
G.i_ext_init = 0.1
M = simulation.EventMonitor(G)
Mo = simulation.StateMonitor(G, 'out')
sim = simulation.Simulation(G, M, Mo, dt=dt)
sim.run(t_max)
# we need a single spike for this
# XXX we could also have set the refractory period to a really high number
self.assertEqual(len(M.t), 1)
t_spike = M.t[0]
mask = (Mo.t > t_spike)
out_t = Mo.t[mask]
out_y = Mo.out[0, mask]
expected = out_y[0]*np.power(1 - dt/G.tau_out, (out_t - t_spike)/dt)
self.assertLess(np.mean(np.abs(out_y - expected)), 1e-6)
##########################
# TestExcitatorySynapses #
##########################
class TestExcitatorySynapses(unittest.TestCase):
def setUp(self):
# generate pseudo-random test case
np.random.seed(123456)
self.t_max = 16.0 # duration of simulation
self.dt = 1.0 # time step
self.N = 15 # number of units in source layer
self.M = 30 # number of units in target layer
self.p = 0.2 # probability of spiking per time step
self.G = TableSpikers(self.N)
self.G.spike_table = (np.random.rand(int_r(self.t_max/self.dt), self.N) <
self.p)
# a simple target layer
class TargetNeurons(object):
def __init__(self, N, v_step=1.0):
self.N = N
self.v_step = v_step
self.active_state = True
def prepare(self, t_max, dt):
self.v = np.zeros(self.N)
self.i_ampa = np.zeros(self.N)
self.i_nmda = np.zeros(self.N)
self.active = np.repeat(self.active_state, self.N)
def evolve(self, t, dt):
self.v += self.v_step
self.Gp = TargetNeurons(self.N, np.inf)
self.T = TargetNeurons(self.M, np.inf)
self.syn_1t1 = ExcitatorySynapses(self.G, self.Gp)
self.syn_dense = ExcitatorySynapses(self.G, self.T)
def test_one_to_one_mismatch(self):
""" Test exception for 1-to-1 synapses b/w layers of different sizes. """
self.assertRaises(Exception, ExcitatorySynapses, self.G, self.T,
one_to_one=True)
def test_one_to_one_transmission_ampa(self):
self.syn_1t1.W = np.linspace(0.1, 2.0, self.N)
sim = simulation.Simulation(self.G, self.Gp, self.syn_1t1, dt=self.dt)
sim.run(self.t_max)
expected = np.zeros(self.N)
for i in xrange(len(self.G.spike_table)):
expected += self.syn_1t1.W*self.G.spike_table[i]
self.assertTrue(np.allclose(expected, self.Gp.i_ampa))
self.assertAlmostEqual(np.linalg.norm(self.Gp.i_nmda), 0.0)
def test_one_to_one_transmission_nmda(self):
self.syn_1t1.W = np.asarray([_ % 2 for _ in xrange(self.N)])
self.syn_1t1.f_nmda = 1.0
sim = simulation.Simulation(self.G, self.Gp, self.syn_1t1, dt=self.dt)
sim.run(self.t_max)
self.assertAlmostEqual(np.linalg.norm(self.Gp.i_ampa), 0.0)
self.assertAlmostEqual(np.linalg.norm(self.Gp.i_nmda[::2].ravel()), 0.0)
expected = np.zeros(self.N)
v = np.zeros(self.N)
for i in xrange(len(self.G.spike_table)):
v += self.Gp.v_step
expected[1::2] += self.G.spike_table[i, 1::2]/(1.0 + self.syn_1t1.mg/3.57*
np.exp(-v[1::2]/16.13))
self.assertTrue(np.allclose(expected, self.Gp.i_nmda))
def test_dense_transmission(self):
""" Test transmission with one-to-one synapses. """
# generate pseudo-random test case
f = 0.5
np.random.seed(6564)
self.syn_dense.W = np.random.randn(self.M, self.N)
self.syn_dense.f_nmda = f
sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)
sim.run(self.t_max)
expected_ampa = np.zeros(self.M)
expected_nmda = np.zeros(self.M)
v = np.zeros(self.M)
for i in xrange(len(self.G.spike_table)):
v += self.T.v_step
effect = np.dot(self.syn_dense.W, self.G.spike_table[i])
expected_ampa += effect
expected_nmda += effect/(1.0 + self.syn_1t1.mg/3.57*
np.exp(-v/16.13))
self.assertTrue(np.allclose((1-f)*expected_ampa, self.T.i_ampa))
self.assertTrue(np.allclose(f*expected_nmda, self.T.i_nmda))
def test_no_effect_during_refractory(self):
""" Check that there is no effect during refractory periods. """
np.random.seed(6564)
f = 0.5
self.syn_dense.W = np.random.randn(self.M, self.N)
self.syn_dense.f_nmda = f
self.syn_dense.change_during_ref = False
self.T.active_state = False
sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)
sim.run(self.t_max)
self.assertAlmostEqual(np.linalg.norm(self.T.i_ampa), 0.0)
self.assertAlmostEqual(np.linalg.norm(self.T.i_nmda), 0.0)
def test_allow_effect_during_refractory(self):
""" Check that it's possible to have effect during refractory periods. """
np.random.seed(6564)
f = 0.5
self.syn_dense.W = np.random.randn(self.M, self.N)
self.syn_dense.f_nmda = f
self.syn_dense.change_during_ref = True
self.T.active_state = False
sim = simulation.Simulation(self.G, self.T, self.syn_dense, dt=self.dt)
sim.run(self.t_max)
self.assertGreater(np.linalg.norm(self.T.i_ampa), 0.1)
self.assertGreater(np.linalg.norm(self.T.i_nmda), 0.1)
def test_init_i_ampa(self):
self.syn_dense.W = np.ones((self.M, self.N))
self.syn_1t1.W = np.ones(self.N)
self.G.avg_rates = 1.0
self.Gp.tau_ampa = 5.0
self.T.tau_ampa = 5.0
sim = simulation.Simulation(self.G, self.Gp, self.T,
self.syn_dense, self.syn_1t1,
dt=self.dt)
sim.run(0)
self.assertGreater(np.linalg.norm(self.T.i_ampa), 1e-3)
self.assertGreater(np.linalg.norm(self.Gp.i_ampa), 1e-3)
def test_init_i_nmda(self):
self.syn_dense.W = np.ones((self.M, self.N))
self.syn_1t1.W = np.ones(self.N)
self.syn_dense.f_nmda = 1.0
self.syn_1t1.f_nmda = 1.0
self.G.avg_rates = 1.0
self.Gp.tau_nmda = 100.0
self.T.tau_nmda = 100.0
sim = simulation.Simulation(self.G, self.Gp, self.T,
self.syn_dense, self.syn_1t1,
dt=self.dt)
sim.run(0)
self.assertGreater(np.linalg.norm(self.T.i_nmda), 1e-3)
self.assertGreater(np.linalg.norm(self.Gp.i_nmda), 1e-3)
########################
# TestLinearController #
########################
class TestLinearController(unittest.TestCase):
def setUp(self):
self.dt = 1.0 # time step
self.N = 24 # number of units in source layer
out_step = 0.1 # amount by which `out` grows at each step
self.G = SimpleNeurons(self.N)
def test_zero(self):
""" Test controller with vanishing weights. """
controller = LinearController(self.G, 2, mode='zero')
sim = simulation.Simulation(self.G, controller, dt=self.dt)
sim.run(self.dt)
self.assertAlmostEqual(np.linalg.norm(controller.W.ravel()), 0.0)
self.assertAlmostEqual(np.linalg.norm(controller.out), 0.0)
def test_sum(self):
""" Test additive controller. """
controller = LinearController(self.G, 2, mode='sum')
self.G.out_fct = lambda _: np.hstack(((self.N/2)*[1], (self.N/2)*[-1]))
sim = simulation.Simulation(self.G, controller, dt=self.dt)
sim.run(self.dt)
value = 1.0*self.dt/controller.tau
self.assertTrue(np.allclose(controller.out, [value, -value]))
def test_push_pull(self):
""" Test push/pull controller. """
controller = LinearController(self.G, 3, mode='pushpull')
self.G.out_fct = lambda _: np.hstack((
(self.N/6)*[1],
(self.N/6)*[-1],
(self.N/6)*[1],
(self.N/6)*[1],
(self.N/6)*[0],
(self.N/6)*[0],
))
sim = simulation.Simulation(self.G, controller, dt=self.dt)
sim.run(self.dt)
value = 1.0/2*self.dt/controller.tau
self.assertTrue(np.allclose(controller.out, [2*value, 0, 0]))
def test_bias_initial(self):
""" Test that initial values start at bias. """
biases = [1, -1]
controller = LinearController(self.G, 2, mode='zero')
controller.bias = biases
sim = simulation.Simulation(self.G, controller, dt=self.dt)
sim.run(0)
self.assertTrue(np.allclose(controller.out, biases))
def test_bias(self):
""" Test controller bias. """
biases = [1, -0.5, 0.5, 1.5]
controller = LinearController(self.G, 4, mode='zero')
controller.bias = biases
sim = simulation.Simulation(self.G, controller, dt=self.dt)
sim.run(self.dt)
self.assertTrue(np.allclose(controller.out, biases))
def test_timescale(self):
""" Test smoothing timescale. """
tau = 25.0
tmax = 50.0
controller = LinearController(self.G, 1, mode='sum', tau=tau)
self.G.out_fct = lambda _: np.ones(self.N)
sim = simulation.Simulation(self.G, controller, dt=self.dt)
sim.run(tmax)
expected = 1.0 - (1.0 - self.dt/tau)**int_r(tmax/self.dt)
self.assertTrue(np.allclose(controller.out, expected))
def test_no_smoothing(self):
""" Test the controller without smoothing. """
# reproducible arbitrariness
np.random.seed(12321)
nsteps = 10
tmax = nsteps*self.dt
sequence = np.random.randn(nsteps)
controller = LinearController(self.G, 1, mode='sum', tau=None)
M = simulation.StateMonitor(controller, 'out')
self.G.out_fct = lambda i: sequence[i]*np.ones(self.N)
sim = simulation.Simulation(self.G, controller, M, dt=self.dt)
sim.run(tmax)
for i in xrange(nsteps):
self.assertTrue(np.allclose(M.out[:, i], sequence[i]))
def test_source_error(self):
""" Test calculation of motor error mapped to source neurons. """
# reproducible arbitrariness
np.random.seed(12321)
nsteps = 10
nchan = 3
tmax = nsteps*self.dt
sequence = np.random.randn(nsteps, self.N)
target = np.random.randn(nchan, nsteps)
controller = LinearController(self.G, target, tau=None)
controller.W = np.random.randn(*controller.W.shape)
self.G.out_fct = lambda i: sequence[i]
class SourceErrorGrabber(object):
def __init__(self, target):
self.target = target
self.order = 10
def prepare(self, tmax, dt):
nsteps = int_r(tmax/dt)
self.motor_error = np.zeros((nsteps, self.target.source.N))
def evolve(self, t, dt):
i = int_r(t/dt)
self.motor_error[i, :] = self.target.get_source_error()
M = SourceErrorGrabber(controller)
M1 = simulation.StateMonitor(controller, 'out')
sim = simulation.Simulation(self.G, controller, M, M1, dt=self.dt)
sim.run(tmax)
for i in xrange(int_r(tmax/self.dt)):
diff = M1.out[:, i] - target[:, i]
self.assertTrue(np.allclose(M.motor_error[i],
np.dot(diff, controller.W)))
def test_motor_error(self):
""" Test calculation of motor error. """
# reproducible arbitrariness
np.random.seed(12325)
nsteps = 10
nchan = 3
tmax = nsteps*self.dt
sequence = np.random.randn(nsteps, self.N)
target = np.random.randn(nchan, nsteps)
controller = LinearController(self.G, target, tau=None)
controller.W = np.random.randn(*controller.W.shape)
self.G.out_fct = lambda i: sequence[i]
class MotorErrorGrabber(object):
def __init__(self, target):
self.target = target
self.order = 10
def prepare(self, tmax, dt):
nsteps = int_r(tmax/dt)
self.motor_error = np.zeros((nsteps, self.target.N))
def evolve(self, t, dt):
i = int_r(t/dt)
self.motor_error[i, :] = self.target.get_motor_error()
M = MotorErrorGrabber(controller)
M1 = simulation.StateMonitor(controller, 'out')
sim = simulation.Simulation(self.G, controller, M, M1, dt=self.dt)
sim.run(tmax)
for i in xrange(int_r(tmax/self.dt)):
diff = M1.out[:, i] - target[:, i]
self.assertTrue(np.allclose(M.motor_error[i], diff))
def test_permute_inverse(self):
""" Test that `permute_inverse` works. """
# reproducible arbitrariness
np.random.seed(12321)
nsteps = 20
nchan = 3
tmax = nsteps*self.dt
sequence = np.random.randn(nsteps, self.N)
permutation = np.arange(self.N)
n1a = 3
n1b = 5
n2a = 13
n2b = 4
permutation[n1a], permutation[n1b] = (permutation[n1b], permutation[n1a])
permutation[n2a], permutation[n2b] = (permutation[n2b], permutation[n2a])
target = np.random.randn(nchan, nsteps)
controller = LinearController(self.G, target, tau=None)
controller.W = np.random.randn(*controller.W.shape)
self.G.out_fct = lambda i: sequence[i]
class SourceErrorGrabber(object):
def __init__(self, target):
self.target = target
self.order = 10
def prepare(self, tmax, dt):
nsteps = int_r(tmax/dt)
self.motor_error = np.zeros((nsteps, self.target.source.N))
def evolve(self, t, dt):
i = int_r(t/dt)
self.motor_error[i, :] = self.target.get_source_error()
ME1 = SourceErrorGrabber(controller)
sim1 = simulation.Simulation(self.G, controller, ME1, dt=self.dt)
sim1.run(tmax)
controller.permute_inverse = permutation
ME2 = SourceErrorGrabber(controller)
sim2 = simulation.Simulation(self.G, controller, ME2, dt=self.dt)
sim2.run(tmax)
# test that the correct source error outputs have been swapped
expected = np.copy(ME1.motor_error)
expected[:, [n1a, n1b]] = expected[:, [n1b, n1a]]
expected[:, [n2a, n2b]] = expected[:, [n2b, n2a]]
self.assertAlmostEqual(np.mean(np.abs(expected - ME2.motor_error)), 0.0)
def test_random_permute_inverse_fraction(self):
""" Test random permutation shuffles correct fraction of neurons. """
# reproducible arbitrariness
np.random.seed(12325)
nchan = 3
nsteps = 20
rho = 1.0/4
target = np.random.randn(nchan, nsteps)
controller = LinearController(self.G, target, tau=None)
controller.set_random_permute_inverse(rho)
self.assertIsNotNone(controller.permute_inverse)
# check that the right fraction of assignments are kept intact
self.assertEqual(np.sum(controller.permute_inverse == np.arange(self.N)),
(1.0 - rho)*self.N)
def test_random_permute_inverse_changes_group(self):
""" Test random permutation moves affected neurons to different groups. """
# reproducible arbitrariness
np.random.seed(232)
nchan = 3
nsteps = 20
rho = 1.0/4
target = np.random.randn(nchan, nsteps)
controller = LinearController(self.G, target, tau=None)
controller.set_random_permute_inverse(rho)
self.assertIsNotNone(controller.permute_inverse)
n_per_group = self.N/nchan
groups0 = np.arange(self.N)/n_per_group
groups1 = controller.permute_inverse/n_per_group
# check that the right fraction of assignments are kept intact
self.assertEqual(np.sum(groups0 != groups1), rho*self.N)
def test_random_permute_inverse_is_random(self):
""" Test random permutation moves changes between trials. """
# reproducible arbitrariness
np.random.seed(2325)
nchan = 3
nsteps = 20
rho = 1.0/4
target = np.random.randn(nchan, nsteps)
controller = LinearController(self.G, target, tau=None)
controller.set_random_permute_inverse(rho)
self.assertIsNotNone(controller.permute_inverse)
perm1 = np.copy(controller.permute_inverse)
controller.set_random_permute_inverse(rho)
perm2 = controller.permute_inverse
self.assertNotEqual(np.sum(perm1 == perm2), self.N)
def test_random_permute_inverse_subdivide(self):
""" Test `subdivid_by` option for random permutation. """
# reproducible arbitrariness
np.random.seed(121)
nchan = 3
nsteps = 20
rho = 1.0/2
subdiv = 2
target = np.random.randn(nchan, nsteps)
controller = LinearController(self.G, target, tau=None)
controller.set_random_permute_inverse(rho, subdivide_by=subdiv)
self.assertIsNotNone(controller.permute_inverse)
n_per_group = self.N/nchan
groups0 = np.arange(self.N)/n_per_group
groups1 = controller.permute_inverse/n_per_group
n_per_subgroup = self.N/(subdiv*nchan)
subgroups0 = np.arange(self.N)/n_per_subgroup
subgroups1 = controller.permute_inverse/n_per_subgroup
# check that the right fraction of assignments are kept intact
self.assertEqual(np.sum(subgroups0 != subgroups1), rho*self.N)
# but that some of the mismatches end up *within the same group*
# (though they come from different subgroups)
self.assertNotEqual(np.sum(groups0 != groups1), rho*self.N)
def test_error_map_fct(self):
""" Test mapping of the source error through a nonlinearity. """
# reproducible arbitrariness
np.random.seed(2343)
nsteps = 12
nchan = 4
tmax = nsteps*self.dt
sequence = np.random.randn(nsteps, self.N)
target = np.random.randn(nchan, nsteps)
controller = LinearController(self.G, target, tau=None)
controller.W = np.random.randn(*controller.W.shape)
controller.error_map_fct = lambda err: np.tanh(err)
self.G.out_fct = lambda i: sequence[i]
class SourceErrorGrabber(object):
def __init__(self, target):
self.target = target
self.order = 10
def prepare(self, tmax, dt):
nsteps = int_r(tmax/dt)
self.motor_error = np.zeros((nsteps, self.target.source.N))
def evolve(self, t, dt):
i = int_r(t/dt)
self.motor_error[i, :] = self.target.get_source_error()
M = SourceErrorGrabber(controller)
M1 = simulation.StateMonitor(controller, 'out')
sim = simulation.Simulation(self.G, controller, M, M1, dt=self.dt)
sim.run(tmax)
for i in xrange(int_r(tmax/self.dt)):
diff = M1.out[:, i] - target[:, i]
self.assertTrue(np.allclose(M.motor_error[i],
np.dot(controller.error_map_fct(diff), controller.W)))
def test_nonlinearity(self):
""" Test linear-nonlinear model. """
# reproducible arbitrariness
np.random.seed(1232321)
nsteps = 10
tmax = nsteps*self.dt
sequence = np.random.randn(nsteps)
controller = LinearController(self.G, 3, mode='sum', tau=20.0)
M1 = simulation.StateMonitor(controller, 'out')
self.G.out_fct = lambda i: sequence[i]*np.ones(self.N)
sim1 = simulation.Simulation(self.G, controller, M1, dt=self.dt)
sim1.run(tmax)
controller.nonlinearity = lambda v: v**2 - v
M2 = simulation.StateMonitor(controller, 'out')
sim2 = simulation.Simulation(self.G, controller, M2, dt=self.dt)
sim2.run(tmax)
self.assertLess(np.max(np.abs(M2.out - controller.nonlinearity(M1.out))),
1e-9)
#################################
# TestTwoExponentialsPlasticity #
#################################
# the tests themselves
class TestTwoExponentialsPlasticity(unittest.TestCase):
def setUp(self):
# generate pseudo-random test case
self.dt = 1.0 # time step
self.Nc = 15 # number of units in conductor layer
self.Ns = 30 # number of units in student layer
# a do-nothing layer
class MockNeurons(object):
def __init__(self, N):
self.N = N
def prepare(self, t_max, dt):
self.v = np.zeros(self.N)
self.i_ampa = np.zeros(self.N)
self.i_nmda = np.zeros(self.N)
def evolve(self, t, dt):
pass
class SimpleSynapses(object):
def __init__(self, source, target):
self.source = source
self.target = target
self.W = np.zeros((self.target.N, self.source.N))
self.order = 1
def evolve(self, t, dt):
self.target.out = np.dot(self.W, self.source.out)
self.conductor = SimpleNeurons(self.Nc)
self.student = MockNeurons(self.Ns)
self.tutor = SimpleNeurons(self.Ns)
# reproducible arbitrariness
np.random.seed(3231)
self.syns = SimpleSynapses(self.conductor, self.student)
self.syns.W = np.random.rand(*self.syns.W.shape)
self.rule = TwoExponentialsPlasticity(self.syns, self.tutor,
constrain_positive=False,
rate=1-6)
def test_linear_in_cond(self):
""" Test that weight change is linear in conductor output. """
# reproducible arbitrariness
np.random.seed(3232)
cond_out = np.random.randn(self.Nc)
alpha = 2.3
self.conductor.out_step = np.copy(cond_out)
self.tutor.out_step = self.rule.theta + 10*np.random.randn(self.Ns)
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(self.dt)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.conductor.out_step = alpha*cond_out
sim.run(self.dt)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, alpha*change1))
def test_linear_in_tut(self):
""" Test that weight change is linear in tutor output. """
# reproducible arbitrariness
np.random.seed(5000)
tut_out = np.random.randn(self.Ns)
alpha = 0.7
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_fct = lambda _: self.rule.theta + tut_out
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(self.dt)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.tutor.out_fct = lambda _: self.rule.theta + alpha*tut_out
sim.run(self.dt)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, alpha*change1))
def test_linear_in_rate(self):
""" Test that weight change is linear in learning rate. """
# reproducible arbitrariness
np.random.seed(4901)
alpha = 1.2
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(self.dt)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.rule.rate *= alpha
sim.run(self.dt)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, alpha*change1))
def test_constrain_positive(self):
""" Test that we can force the weights to stay positive. """
# first run without constraints and make sure some weights become negative
# NB: need to divide by 2 because the SimpleLayer's `evolve` gets called
# before the plasticity rule's `evolve`, and so the tutor output becomes
# *twice* `out_step`
self.tutor.out_step = self.rule.theta/2 + np.hstack(( np.ones(self.Ns/2),
-np.ones(self.Ns/2)))
self.conductor.out_step = np.ones(self.Nc)
self.syns.W = np.zeros(self.syns.W.shape)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(self.dt)
self.assertGreater(np.sum(self.syns.W < 0), 0)
# next run with the constraint and check that everything stays positive
self.rule.constrain_positive = True
self.syns.W = np.zeros(self.syns.W.shape)
sim.run(self.dt)
self.assertEqual(np.sum(self.syns.W < 0), 0)
def test_prop_alpha(self):
""" Test that synaptic change is linear in `alpha`. """
# reproducible arbitrariness
np.random.seed(5001)
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
self.rule.alpha = 1.0
self.rule.beta = 0.0
tmax = 5*self.dt
factor = 1.3
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(tmax)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.rule.alpha *= factor
sim.run(tmax)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, factor*change1))
def test_prop_beta(self):
""" Test that synaptic change is linear in `beta`. """
# reproducible arbitrariness
np.random.seed(1321)
self.rule.alpha = 0
self.rule.beta = 0.5
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
factor = 1.5
tmax = 7*self.dt
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(tmax)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.rule.beta *= factor
sim.run(tmax)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, factor*change1))
def test_additive_alpha_beta(self):
""" Test that alpha and beta components are additive. """
np.random.seed(912838)
param_pairs = [(1.0, 0.0), (0.0, 1.0), (1.0, 1.0)]
tmax = 4*self.dt
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
W0 = np.copy(self.syns.W)
changes = []
for params in param_pairs:
self.rule.alpha = params[0]
self.rule.beta = params[1]
self.syns.W = np.copy(W0)
sim.run(tmax)
changes.append(self.syns.W - W0)
self.assertTrue(np.allclose(changes[-1], changes[0] + changes[1]))
def test_timescales(self):
""" Test the timescales for alpha and beta components. """
np.random.seed(2312321)
param_pairs = [(1, 0, self.rule.tau1), (0, 1, self.rule.tau2)]
nsteps = 10
self.conductor.out_fct = lambda i: 10*np.ones(self.Nc) if i == 0 \
else np.zeros(self.Nc)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
W0 = np.copy(self.syns.W)
for params in param_pairs:
self.rule.alpha = params[0]
self.rule.beta = params[1]
tau = params[2]
self.tutor.out_fct = lambda i: (self.rule.theta + (10 if i == 0 else 0))*\
np.ones(self.Ns)
self.syns.W = np.copy(W0)
sim.run(self.dt)
change0 = self.syns.W - W0
self.assertGreater(np.linalg.norm(change0), 1e-10)
self.tutor.out_fct = lambda i: (self.rule.theta + (10
if i == nsteps-1 else 0))*np.ones(self.Ns)
self.syns.W = np.copy(W0)
sim.run(nsteps*self.dt)
change1 = self.syns.W - W0
change1_exp = change0*(1 - float(self.dt)/tau)**(nsteps-1)
self.assertTrue(np.allclose(change1, change1_exp),
msg="Timescale not verified, alpha={}, beta={}.".format(*params[:2]))
def test_tuple_synapses(self):
""" Test using a tuple instead of synapses object. """
# reproducible arbitrariness
np.random.seed(5003)
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
self.rule.alpha = 1.0
self.rule.beta = 1.5
tmax = 10*self.dt
W0 = np.copy(self.syns.W)
sim1 = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim1.run(tmax)
final1 = np.copy(self.syns.W)
self.syns.W = np.copy(W0)
rule2 = TwoExponentialsPlasticity(
(self.syns.source, self.syns.target, self.syns.W),
self.tutor, constrain_positive=False, rate=1-6)
rule2.alpha = 1.0
rule2.beta = 1.5
sim2 = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, rule2, dt=self.dt)
sim2.run(tmax)
final2 = np.copy(self.syns.W)
self.assertTrue(np.allclose(final1, final2))
##################################
# TestSuperExponentialPlasticity #
##################################
class TestSuperExponentialPlasticity(unittest.TestCase):
def setUp(self):
# generate pseudo-random test case
self.dt = 1.0 # time step
self.Nc = 15 # number of units in conductor layer
self.Ns = 30 # number of units in student layer
# a do-nothing layer
class MockNeurons(object):
def __init__(self, N):
self.N = N
def prepare(self, t_max, dt):
self.v = np.zeros(self.N)
self.i_ampa = np.zeros(self.N)
self.i_nmda = np.zeros(self.N)
def evolve(self, t, dt):
pass
class SimpleSynapses(object):
def __init__(self, source, target):
self.source = source
self.target = target
self.W = np.zeros((self.target.N, self.source.N))
self.order = 1
def evolve(self, t, dt):
self.target.out = np.dot(self.W, self.source.out)
self.conductor = SimpleNeurons(self.Nc)
self.student = MockNeurons(self.Ns)
self.tutor = SimpleNeurons(self.Ns)
# reproducible arbitrariness
np.random.seed(3231)
self.syns = SimpleSynapses(self.conductor, self.student)
self.syns.W = np.random.rand(*self.syns.W.shape)
self.rule = SuperExponentialPlasticity(self.syns, self.tutor,
constrain_positive=False,
rate=1-6)
def test_linear_in_cond(self):
""" Test that weight change is linear in conductor output. """
# reproducible arbitrariness
np.random.seed(3232)
cond_out = np.random.randn(self.Nc)
alpha = 2.3
self.conductor.out_step = np.copy(cond_out)
self.tutor.out_step = self.rule.theta + 10*np.random.randn(self.Ns)
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(self.dt)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.conductor.out_step = alpha*cond_out
sim.run(self.dt)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, alpha*change1))
def test_linear_in_tut(self):
""" Test that weight change is linear in tutor output. """
# reproducible arbitrariness
np.random.seed(5000)
tut_out = np.random.randn(self.Ns)
alpha = 0.7
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_fct = lambda _: self.rule.theta + tut_out
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(self.dt)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.tutor.out_fct = lambda _: self.rule.theta + alpha*tut_out
sim.run(self.dt)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, alpha*change1))
def test_linear_in_rate(self):
""" Test that weight change is linear in learning rate. """
# reproducible arbitrariness
np.random.seed(4901)
alpha = 1.2
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(self.dt)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.rule.rate *= alpha
sim.run(self.dt)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, alpha*change1))
def test_constrain_positive(self):
""" Test that we can force the weights to stay positive. """
# first run without constraints and make sure some weights become negative
# NB: need to divide by 2 because the SimpleLayer's `evolve` gets called
# before the plasticity rule's `evolve`, and so the tutor output becomes
# *twice* `out_step`
self.tutor.out_step = self.rule.theta/2 + np.hstack(( np.ones(self.Ns/2),
-np.ones(self.Ns/2)))
self.conductor.out_step = np.ones(self.Nc)
self.syns.W = np.zeros(self.syns.W.shape)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(self.dt)
self.assertGreater(np.sum(self.syns.W < 0), 0)
# next run with the constraint and check that everything stays positive
self.rule.constrain_positive = True
self.syns.W = np.zeros(self.syns.W.shape)
sim.run(self.dt)
self.assertEqual(np.sum(self.syns.W < 0), 0)
def test_prop_alpha(self):
""" Test that synaptic change is linear in `alpha`. """
# reproducible arbitrariness
np.random.seed(5001)
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
self.rule.alpha = 1.0
self.rule.beta = 0.0
tmax = 5*self.dt
factor = 1.3
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(tmax)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.rule.alpha *= factor
sim.run(tmax)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, factor*change1))
def test_prop_beta(self):
""" Test that synaptic change is linear in `beta`. """
# reproducible arbitrariness
np.random.seed(1321)
self.rule.alpha = 0
self.rule.beta = 0.5
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
factor = 1.5
tmax = 7*self.dt
W0 = np.copy(self.syns.W)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim.run(tmax)
change1 = self.syns.W - W0
self.syns.W = np.copy(W0)
self.rule.beta *= factor
sim.run(tmax)
change2 = self.syns.W - W0
self.assertTrue(np.allclose(change2, factor*change1))
def test_additive_alpha_beta(self):
""" Test that alpha and beta components are additive. """
np.random.seed(912838)
param_pairs = [(1.0, 0.0), (0.0, 1.0), (1.0, 1.0)]
tmax = 4*self.dt
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
W0 = np.copy(self.syns.W)
changes = []
for params in param_pairs:
self.rule.alpha = params[0]
self.rule.beta = params[1]
self.syns.W = np.copy(W0)
sim.run(tmax)
changes.append(self.syns.W - W0)
self.assertTrue(np.allclose(changes[-1], changes[0] + changes[1]))
def test_timescale_beta(self):
""" Test the timescale for beta component. """
param_pairs = [(0, 1, self.rule.tau2)]
nsteps = 10
self.conductor.out_fct = lambda i: 10*np.ones(self.Nc) if i == 0 \
else np.zeros(self.Nc)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
W0 = np.copy(self.syns.W)
for params in param_pairs:
self.rule.alpha = params[0]
self.rule.beta = params[1]
tau = params[2]
self.tutor.out_fct = lambda i: (self.rule.theta + (10 if i == 0 else 0))*\
np.ones(self.Ns)
self.syns.W = np.copy(W0)
sim.run(self.dt)
change0 = self.syns.W - W0
self.assertGreater(np.linalg.norm(change0), 1e-10)
self.tutor.out_fct = lambda i: (self.rule.theta + (10
if i == nsteps-1 else 0))*np.ones(self.Ns)
self.syns.W = np.copy(W0)
sim.run(nsteps*self.dt)
change1 = self.syns.W - W0
change1_exp = change0*(1 - float(self.dt)/tau)**(nsteps-1)
self.assertTrue(np.allclose(change1, change1_exp),
msg="Timescale not verified, alpha={}, beta={}.".format(*params[:2]))
def test_super_exponential(self):
""" Test alpha component goes like t*e^{-t}. """
nsteps = 100
self.dt = 0.1
self.conductor.out_fct = lambda i: 10*np.ones(self.Nc) if i == 0 \
else np.zeros(self.Nc)
sim = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
W0 = np.copy(self.syns.W)
self.rule.alpha = 1
self.rule.beta = 0
tau = self.rule.tau1
j1 = nsteps/3
j2 = nsteps
self.tutor.out_fct = lambda i: (self.rule.theta +
(10 if i == j1-1 else 0))*np.ones(self.Ns)
delta1 = j1*self.dt
self.syns.W = np.copy(W0)
sim.run(delta1)
change1 = self.syns.W - W0
self.assertGreater(np.linalg.norm(change1), 1e-10)
self.tutor.out_fct = lambda i: (self.rule.theta +
(10 if i == j2-1 else 0))*np.ones(self.Ns)
delta2 = j2*self.dt
self.syns.W = np.copy(W0)
sim.run(delta2)
change2 = self.syns.W - W0
self.assertGreater(np.linalg.norm(change2), 1e-10)
ratio = change1/change2
ratio_exp = ((delta1/delta2)*(np.exp(-(delta1 - delta2)/tau))
*np.ones(np.shape(ratio)))
self.assertLess(np.max(np.abs(ratio - ratio_exp)/ratio), 0.05)
def test_tuple_synapses(self):
""" Test using a tuple instead of synapses object. """
# reproducible arbitrariness
np.random.seed(5003)
self.conductor.out_step = np.random.randn(self.Nc)
self.tutor.out_step = np.random.randn(self.Ns)
self.rule.alpha = 1.0
self.rule.beta = 1.5
tmax = 10*self.dt
W0 = np.copy(self.syns.W)
sim1 = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, self.rule, dt=self.dt)
sim1.run(tmax)
final1 = np.copy(self.syns.W)
self.syns.W = np.copy(W0)
rule2 = SuperExponentialPlasticity(
(self.syns.source, self.syns.target, self.syns.W),
self.tutor, constrain_positive=False, rate=1-6)
rule2.alpha = 1.0
rule2.beta = 1.5
sim2 = simulation.Simulation(self.conductor, self.student, self.tutor,
self.syns, rule2, dt=self.dt)
sim2.run(tmax)
final2 = np.copy(self.syns.W)
self.assertTrue(np.allclose(final1, final2))
#########################
# TestBlackboxTutorRule #
#########################
class TestBlackboxTutorRule(unittest.TestCase):
def setUp(self):
self.Nsrc = 12 # number of source neurons
self.Nout = 3 # number of output channels
class MockSource(object):
def __init__(self, N):
self.N = N
def evolve(self, t, dt):
pass
class MockController(object):
def __init__(self, N, source, error_fct):
self.N = N
self.source = source
self.error_fct = error_fct
self.order = 2
def prepare(self, tmax, dt):
self._last_error = self.error_fct(0)
def evolve(self, t, dt):
self._last_error = self.error_fct(t)
def get_source_error(self):
return self._last_error
self.source = MockSource(self.Nsrc)
self.motor = MockController(self.Nout, self.source, lambda _: 0)
self.rule = BlackboxTutorRule(self.motor, gain=1)
def test_memory(self):
""" Test the timescale of the integration. """
tau = 53.0
tau0 = 22.0
mrate = 50.0
Mrate = 100.0
tmax = 100.0
dt = 0.01
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
ndiv3 = self.Nsrc/3
self.motor.error_fct = lambda t: np.hstack((
np.cos(t/tau0)*np.ones(ndiv3), np.sin(t/tau0)*np.ones(ndiv3),
np.ones(ndiv3)))
M = simulation.StateMonitor(self.rule, 'out')
sim = simulation.Simulation(self.source, self.motor, self.rule, M, dt=dt)
sim.run(tmax)
# tutor output points *opposite* the motor error!
prefactor = -self.rule.gain*tau0/(tau*tau + tau0*tau0)
integral_part1 = np.cos(M.t/tau0)*np.exp(-M.t/tau)
integral_part2 = np.sin(M.t/tau0)*np.exp(-M.t/tau)
expected_cos = prefactor*(tau0 - tau0*integral_part1 + tau*integral_part2)
expected_sin = prefactor*(tau - tau*integral_part1 - tau0*integral_part2)
expected_const = -(1 - np.exp(-M.t/tau))
mavg = (mrate + Mrate)*0.5
mdiff = (Mrate - mrate)*0.5
expected = np.vstack((
np.tile(mavg + mdiff*expected_cos, (ndiv3, 1)),
np.tile(mavg + mdiff*expected_sin, (ndiv3, 1)),
np.tile(mavg + mdiff*expected_const, (ndiv3, 1))
))
# mismatch is relatively large since we're using Euler's method
# we can't do much better, however, since the motor controller cannot give
# us motor error information at sub-step resolution
mismatch = np.mean(np.abs(expected - M.out)/expected)
self.assertLess(mismatch, 0.05)
def test_no_memory(self):
""" Test instantaneous response. """
tau0 = 23.0
mrate = 50.0
Mrate = 100.0
tmax = 100.0
dt = 0.2
self.rule.tau = 0
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
ndiv3 = self.Nsrc/3
self.motor.error_fct = lambda t: np.hstack((
np.cos(t/tau0)*np.ones(ndiv3), np.sin(t/tau0)*np.ones(ndiv3),
np.ones(ndiv3)))
M = simulation.StateMonitor(self.rule, 'out')
sim = simulation.Simulation(self.source, self.motor, self.rule, M, dt=dt)
sim.run(tmax)
mavg = (mrate + Mrate)*0.5
mdiff = (Mrate - mrate)*0.5
# tutor output points *opposite* the motor error!
expected = mavg - mdiff*np.vstack((
np.tile(np.cos(M.t/tau0), (ndiv3, 1)),
np.tile(np.sin(M.t/tau0), (ndiv3, 1)),
np.ones((ndiv3, len(M.t)))))
mismatch = np.mean(np.abs(M.out - expected))
self.assertAlmostEqual(mismatch, 0)
def test_gain(self):
""" Test gain controls. """
tau = 50.0
mrate = 50.0
Mrate = 100.0
gain = 5
tmax = 50.0
dt = 0.1
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.rule.gain = 1
self.motor.error_fct = lambda _: np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
self.rule.gain = gain
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
mavg = (mrate + Mrate)*0.5
out1 = M1.out - mavg
out2 = M2.out - mavg
self.assertTrue(np.allclose(gain*out1, out2), msg=
"mean(abs(gain*out1 - out2))={}".format(
np.mean(np.abs(gain*out1 - out2))))
def test_range_no_compress(self):
""" Test range controls when there is no compression. """
tau = 40.0
mrate1 = 50.0
Mrate1 = 100.0
mrate2 = 30.0
Mrate2 = 130.0
tmax = 50.0
dt = 0.1
self.rule.tau = tau
self.rule.min_rate = mrate1
self.rule.max_rate = Mrate1
self.rule.compress_rates = False
self.motor.error_fct = lambda t: (int_r(t)%2)*np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
self.rule.min_rate = mrate2
self.rule.max_rate = Mrate2
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
expected2 = mrate2 + (M1.out - mrate1)*(Mrate2 - mrate2)/(Mrate1 - mrate1)
self.assertTrue(np.allclose(M2.out, expected2), msg=
"mean(abs(out2 - expected2))={}".format(
np.mean(np.abs(M2.out - expected2))))
def test_compress_works(self):
""" Test that compression keeps firing rates from exceeding limits. """
tau = 45.0
mrate = 60.0
Mrate = 100.0
gain = 5
tmax = 50.0
dt = 0.2
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.rule.gain = gain
self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
# make sure we normally go outside the range
self.assertGreater(np.sum(M1.out < mrate), 0)
self.assertGreater(np.sum(M1.out > Mrate), 0)
self.rule.compress_rates = True
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
self.assertEqual(np.sum(M2.out < mrate), 0)
self.assertEqual(np.sum(M2.out > Mrate), 0)
def test_compression_tanh(self):
""" Test that compression performs tanh on uncompressed results. """
tau = 48.0
mrate = 60.0
Mrate = 100.0
gain = 5
tmax = 50.0
dt = 0.2
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.rule.gain = gain
self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
self.rule.compress_rates = True
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
mavg = 0.5*(mrate + Mrate)
mdiff = 0.5*(Mrate - mrate)
expected = mavg + mdiff*np.tanh((M1.out - mavg)/mdiff)
self.assertTrue(np.allclose(M2.out, expected), msg=
"mean(abs(out - expected))={}".format(np.mean(np.abs(M2.out - expected))))
def test_deconvolve_to_motor_error(self):
""" Test that deconvolving can undo the effect of the memory integral. """
tau = 50.0
mrate = 50.0
Mrate = 100.0
tmax = 50.0
dt = 0.1
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.rule.gain = 1
self.rule.tau_deconv1 = tau
self.motor.error_fct = lambda _: np.ones(self.Nsrc)
M = simulation.StateMonitor(self.rule, 'out')
sim = simulation.Simulation(self.source, self.motor, self.rule, M, dt=dt)
sim.run(tmax)
# the output should be almost constant
self.assertAlmostEqual(np.std(M.out)/np.mean(M.out), 0)
def test_deconvolve_once_general(self):
""" Test more general deconvolution timescale. """
tau = 50.0
tau_deconv = 20.0
mrate = 50.0
Mrate = 100.0
tmax = 60.0
dt = 0.1
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
self.rule.tau_deconv1 = tau_deconv
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
mavg = (mrate + Mrate)*0.5
mdiff = (Mrate - mrate)*0.5
out1 = (M1.out - mavg)/mdiff
out2 = (M2.out - mavg)/mdiff
der_out1 = np.diff(out1, axis=1)/dt
expected_out2_crop = out1[:, 1:] + tau_deconv*der_out1
# mismatch is relatively large since we're using Euler's method
# we can't do much better, however, since the motor controller cannot give
# us motor error information at sub-step resolution
mismatch = np.mean(np.abs(expected_out2_crop - out2[:, 1:])/
expected_out2_crop)
self.assertLess(mismatch, 1e-3)
def test_deconvolve_once_symmetric(self):
""" Test that it doesn't matter which tau_deconv is non-zero. """
tau = 50.0
tau_deconv = 20.0
mrate = 50.0
Mrate = 100.0
tmax = 60.0
dt = 0.1
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.rule.tau_deconv1 = tau_deconv
self.rule.tau_deconv2 = None
self.motor.error_fct = lambda t: (int_r(t/20.0)%3-1)*np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
self.rule.tau_deconv1 = None
self.rule.tau_deconv2 = tau_deconv
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
self.assertTrue(np.allclose(M1.out, M2.out))
def test_deconvolve_second(self):
""" Test deconvolution with two timescales. """
tau = 50.0
tau_deconv1 = 20.0
tau_deconv2 = 35.0
mrate = 50.0
Mrate = 100.0
tmax = 100.0
dt = 0.1
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.rule.tau_deconv1 = tau_deconv1
self.rule.tau_deconv2 = None
self.motor.error_fct = lambda t: 2*np.sin(0.123 + t/15.0)*np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
self.rule.tau_deconv2 = tau_deconv2
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
mavg = (mrate + Mrate)*0.5
mdiff = (Mrate - mrate)*0.5
out1 = (M1.out - mavg)/mdiff
out2 = (M2.out - mavg)/mdiff
der_out1 = np.diff(out1, axis=1)/dt
expected_out2_crop = out1[:, 1:] + tau_deconv2*der_out1
# mismatch is relatively large since we're using Euler's method
# we can't do much better, however, since the motor controller cannot give
# us motor error information at sub-step resolution
mismatch = np.mean(np.abs(expected_out2_crop - out2[:, 1:])/
expected_out2_crop)
self.assertLess(mismatch, 1e-3)
def test_deconvolve_symmetric(self):
""" Test that deconvolution is symmetric in the two timescales. """
tau = 50.0
tau_deconv1 = 5.0
tau_deconv2 = 20.0
mrate = 50.0
Mrate = 100.0
tmax = 60.0
dt = 0.1
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.rule.tau_deconv1 = tau_deconv1
self.rule.tau_deconv2 = tau_deconv2
self.motor.error_fct = lambda t: 2*np.sin(0.123 + t/15.0)*np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
self.rule.tau_deconv1 = tau_deconv2
self.rule.tau_deconv2 = tau_deconv1
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
self.assertTrue(np.allclose(M1.out, M2.out))
def test_relaxation_end(self):
""" Test that relaxation is done after time `relaxation/2`. """
tau = 50.0
mrate = 40.0
Mrate = 120.0
tmax = 50.0
dt = 0.1
relaxation = 20.0
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.rule.relaxation = relaxation
self.motor.error_fct = lambda _: np.ones(self.Nsrc)
M = simulation.StateMonitor(self.rule, 'out')
sim = simulation.Simulation(self.source, self.motor, self.rule, M, dt=dt)
sim.run(tmax)
mask = (M.t > tmax - relaxation/2)
mavg = 0.5*(mrate + Mrate)
self.assertAlmostEqual(np.mean(np.abs(M.out[:, mask] - mavg)), 0.0)
def test_relaxation_no_change_beginning(self):
""" Test that non-zero `relaxation` doesn't change beginning of run. """
tau = 50.0
mrate = 40.0
Mrate = 120.0
tmax = 50.0
dt = 0.1
relaxation = 20.0
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.motor.error_fct = lambda _: np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
self.rule.relaxation = relaxation
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
mask = (M1.t < tmax - relaxation)
self.assertAlmostEqual(np.mean(np.abs(M1.out[:, mask] - M2.out[:, mask])),
0.0)
self.assertNotAlmostEqual(np.mean(np.abs(M1.out[:, ~mask] -
M2.out[:, ~mask])), 0.0)
def test_relaxation_smooth_monotonic(self):
""" Test that relaxation is smooth, monotonic, and non-constant. """
tau = 50.0
mrate = 40.0
Mrate = 120.0
tmax = 100.0
dt = 0.1
relaxation = 50.0
self.rule.tau = tau
self.rule.min_rate = mrate
self.rule.max_rate = Mrate
self.rule.compress_rates = False
self.motor.error_fct = lambda _: np.ones(self.Nsrc)
M1 = simulation.StateMonitor(self.rule, 'out')
sim1 = simulation.Simulation(self.source, self.motor, self.rule, M1, dt=dt)
sim1.run(tmax)
self.rule.relaxation = relaxation
M2 = simulation.StateMonitor(self.rule, 'out')
sim2 = simulation.Simulation(self.source, self.motor, self.rule, M2, dt=dt)
sim2.run(tmax)
mask = ((M1.t >= tmax - relaxation) & (M1.t <= tmax - relaxation/2))
self.assertAlmostEqual(np.max(np.std(M1.out[:, mask], axis=0)), 0.0)
self.assertAlmostEqual(np.max(np.std(M2.out[:, mask], axis=0)), 0.0)
inp = np.mean(M1.out[:, mask], axis=0)
out = np.mean(M2.out[:, mask], axis=0)
mavg = 0.5*(mrate + Mrate)
step_profile = (out - mavg) / (inp - mavg)
# make sure step is monotonically decreasing, and between 0 and 1
self.assertTrue(np.all(np.diff(step_profile) < 0))
self.assertLessEqual(np.max(step_profile), 1.0)
self.assertGreaterEqual(np.min(step_profile), 0.0)
# make sure the slope isn't *too* big
max_diff = float(dt)/(relaxation / 5.0)
self.assertLess(np.max(-np.diff(step_profile)), max_diff)
##############################
# TestReinforcementTutorRule #
##############################
# helper class
class MockReward(object):
def __init__(self, reward_fct):
self.reward_fct = reward_fct
def prepare(self, t, dt):
self.reward = 0
def evolve(self, t, dt):
self.reward = self.reward_fct(t)
def __call__(self):
return self.reward
# actual tests
class TestReinforcementTutorRule(unittest.TestCase):
def test_direction_no_int(self):
""" Test that rates change in the expected direction (tau = 0). """
tmax = 40.0
dt = 0.1
tutor = SimpleNeurons(2, out_fct=lambda _: [100.0, 60.0])
reward = MockReward(lambda t: 1.0 if t < tmax/2 else -1)
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,
constrain_rates=False, ini_rate=80.0, learning_rate=0.1,
use_tutor_baseline=False)
sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim.run(tmax)
# tutor_rule's output should be increasing for t < tmax/2, decreasing after
mask = (np.arange(0, tmax, dt) < tmax/2)
self.assertGreater(np.min(tutor_rule.rates[mask, 0]), 80.0)
self.assertLess(np.max(tutor_rule.rates[mask, 1]), 80.0)
self.assertGreater(np.min(tutor_rule.rates[~mask, 1]), 80.0)
self.assertLess(np.max(tutor_rule.rates[~mask, 0]), 80.0)
def test_out_follows_rates(self):
""" Test that rule output follows rates field. """
tmax = 40.0
dt = 0.1
tutor = SimpleNeurons(2, out_fct=lambda _: [100.0, 60.0])
reward = MockReward(lambda _: 0.0)
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,
constrain_rates=False, ini_rate=80.0, learning_rate=0.1,
use_tutor_baseline=False)
nsteps = int_r(tmax/dt)
tutor_rule.rates = np.zeros((nsteps, 2))
tutor_rule.rates[:, 0] = np.linspace(0, 1, nsteps)
tutor_rule.rates[:, 1] = np.linspace(1, 0, nsteps)
M = simulation.StateMonitor(tutor_rule, 'out')
sim = simulation.Simulation(tutor, reward, tutor_rule, M, dt=dt)
sim.run(tmax)
self.assertLess(np.max(np.abs(M.out[0] - np.linspace(0, 1, nsteps))), 1e-6)
self.assertLess(np.max(np.abs(M.out[1] - np.linspace(1, 0, nsteps))), 1e-6)
def test_prop_learning_rate(self):
""" Test that rates changes are proportional to learning rate. """
tmax = 10.0
dt = 1.0
learning_rate1 = 0.1
learning_rate2 = 0.5
ini_rate = 80.0
tutor = SimpleNeurons(1, out_fct=lambda _: ini_rate+20.0)
reward = MockReward(lambda t: 1.0 if t < tmax/2 else -1)
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,
constrain_rates=False, ini_rate=ini_rate, learning_rate=learning_rate1,
use_tutor_baseline=False)
sim1 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim1.run(tmax)
drates1 = tutor_rule.rates - ini_rate
tutor_rule.reset_rates()
tutor_rule.learning_rate = learning_rate2
sim2 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim2.run(tmax)
drates2 = tutor_rule.rates - ini_rate
self.assertLess(np.max(np.abs(learning_rate2*drates1 -
learning_rate1*drates2)), 1e-6)
def test_prop_reward(self):
""" Test that rates changes scale linearly with reward. """
tmax = 10.0
dt = 1.0
reward_scale = 5.0
ini_rate = 80.0
tutor = SimpleNeurons(1, out_fct=lambda _: ini_rate+20.0)
reward = MockReward(lambda t: 1.0 if t < tmax/2 else -1)
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,
constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,
use_tutor_baseline=False)
sim1 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim1.run(tmax)
drates1 = tutor_rule.rates - ini_rate
tutor_rule.reset_rates()
reward.reward_fct = lambda t: reward_scale if t < tmax/2 else -reward_scale
sim2 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim2.run(tmax)
drates2 = tutor_rule.rates - ini_rate
self.assertLess(np.max(np.abs(reward_scale*drates1 - drates2)), 1e-6)
def test_prop_fluctuation(self):
""" Test that rates changes scale linearly with fluctuation size. """
tmax = 10.0
dt = 1.0
ini_rate = 80.0
nsteps = int_r(tmax/dt)
tutor = SimpleNeurons(1, out_fct=lambda i: ini_rate + i*20.0/nsteps - 10.0)
reward = MockReward(lambda _: 1.0)
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,
constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,
use_tutor_baseline=False)
sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim.run(tmax)
drates = (tutor_rule.rates - ini_rate)[:, 0]
fluctuations = (np.arange(nsteps)*20.0/nsteps - 10.0)
mask = (fluctuations > 0)
ratio = np.mean(drates[mask] / fluctuations[mask])
self.assertLess(np.max(np.abs(drates - ratio*fluctuations)), 1e-6)
def test_constrain_rates(self):
""" Test that we can keep rates constrained in a given range. """
tmax = 10.0
dt = 1.0
ini_rate = 80.0
min_rate = ini_rate - 5.0
max_rate = ini_rate + 5.0
nsteps = int_r(tmax/dt)
tutor = SimpleNeurons(1, out_fct=lambda i: ini_rate + i*20.0/nsteps - 10.0)
reward = MockReward(lambda _: 1.0)
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,
constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,
min_rate=min_rate, max_rate=max_rate,
use_tutor_baseline=False)
sim1 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim1.run(tmax)
# rates should exceed limits
self.assertGreater(np.max(tutor_rule.rates), max_rate)
self.assertLess(np.min(tutor_rule.rates), min_rate)
tutor_rule.constrain_rates = True
tutor_rule.reset_rates()
sim2 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim2.run(tmax)
# rates should no longer exceed limits
self.assertLessEqual(np.max(tutor_rule.rates), max_rate)
self.assertGreaterEqual(np.min(tutor_rule.rates), min_rate)
def test_tau(self):
""" Test integrating the reward-fluctuation product over some timescale. """
tau_values = [5.0, 15.0, 25.0]
tmax = 50.0
dt = 0.1
N = 3
ini_rate = 80.0
nsteps = int_r(tmax/dt)
# reproducible arbitrariness
np.random.seed(34342)
tutor_out_trace = ini_rate + 20.0*np.random.randn(nsteps, N)
# have some correlation between reward trace and tutor.out trace
rho = 0.2
reward_trace = (rho*(tutor_out_trace[:, 0] - ini_rate)/20.0 +
(1-rho)*np.random.randn(nsteps))
scaling = None
for crt_tau in tau_values:
tutor = SimpleNeurons(N, out_fct=lambda i: tutor_out_trace[i])
reward = MockReward(lambda t: reward_trace[int_r(t/dt)])
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=crt_tau,
constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,
use_tutor_baseline=False)
sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim.run(tmax)
drates = tutor_rule.rates - ini_rate
# this should be a convolution of tutor_out_trace*reward_trace with an
# exponential with time constant crt_tau
# that means that tau*(d/dt)drates + drates must be proportional to it
expected_rhs = (tutor_out_trace - ini_rate)*np.reshape(reward_trace,
(-1, 1))
lhs = np.vstack((float(crt_tau)*np.reshape(drates[0, :], (1, -1))/dt,
(crt_tau/dt)*np.diff(drates, axis=0) + drates[:-1, :]))
# allow scaling to be arbitrary, but *independent of tau*
if scaling is None:
mask = (expected_rhs != 0)
scaling = np.mean(lhs[mask]/expected_rhs[mask])
# scaling shouldn't be negative or zero!
self.assertGreater(scaling, 1e-9)
mag = np.mean(np.abs(expected_rhs))
self.assertLess(np.max(np.abs(lhs - scaling*expected_rhs)), 1e-6*mag)
def test_use_tutor_baseline(self):
""" Test using average tut. rates instead of intended ones as reference. """
tmax = 40.0
dt = 1.0
ini_rate = 80.0
nruns = 11
tutor = SimpleNeurons(2, out_fct=lambda _: [100.0, 60.0])
reward = MockReward(lambda t: 1.0 if t < tmax/2 else -1)
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,
constrain_rates=False, ini_rate=ini_rate, learning_rate=0.1)
tutor_rule.use_tutor_baseline = True
tutor_rule.baseline_n = 5
for i in xrange(nruns):
# we first set the baselines for the two neurons to some values different
# from tutor_rule's ini_rate, and then in the last round, we test how the
# rates change
if i == nruns-1:
tutor.out_fct = lambda _: [80.0, 80.0]
tutor_rule.reset_rates()
sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim.run(tmax)
drates = tutor_rule.rates - ini_rate
# for the first neuron, for t < tmax/2, the current firing rate is below the
# baseline and the reward is positive, so the rates should *decrease*
# for t >= tmax/2, the rates should *increase*
# the opposite should happen for the second neuron
mask = (np.arange(0, tmax, dt) < tmax/2)
self.assertGreater(np.min(drates[mask, 1]), 0)
self.assertLess(np.max(drates[mask, 0]), 0)
self.assertGreater(np.min(drates[~mask, 0]), 0)
self.assertLess(np.max(drates[~mask, 1]), 0)
def test_calculate_tutor_baseline(self):
""" Test calculation of average tutor rates. """
tmax = 40.0
dt = 1.0
ini_rate = 80.0
baseline_n = 5
rate1 = ini_rate + 20.0
rate2 = ini_rate - 10.0
nruns = 10
nsteps = int_r(tmax/dt)
tutor = SimpleNeurons(2, out_fct=lambda i:
[rate1, rate2] if i < nsteps/2 else [rate2, rate1])
reward = MockReward(lambda _: 0.0)
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,
constrain_rates=False, ini_rate=ini_rate, learning_rate=0.1,
use_tutor_baseline=True, baseline_n=baseline_n)
factor = 1 - 1.0/baseline_n
for i in xrange(nruns):
tutor_rule.reset_rates()
sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)
sim.run(tmax)
crt_baseline = tutor_rule.baseline
self.assertEqual(np.ndim(crt_baseline), 2)
self.assertEqual(np.shape(crt_baseline)[0], nsteps)
self.assertEqual(np.shape(crt_baseline)[1], 2)
expected1 = rate1 + (ini_rate - rate1)*factor**(i+1)
expected2 = rate2 + (ini_rate - rate2)*factor**(i+1)
self.assertLess(np.max(np.abs(crt_baseline[:nsteps/2, 0] - expected1)),
1e-6)
self.assertLess(np.max(np.abs(crt_baseline[nsteps/2:, 0] - expected2)),
1e-6)
self.assertLess(np.max(np.abs(crt_baseline[:nsteps/2, 1] - expected2)),
1e-6)
self.assertLess(np.max(np.abs(crt_baseline[nsteps/2:, 1] - expected1)),
1e-6)
def test_relaxation_end(self):
""" Test that relaxation is done after time `relaxation/2`. """
tau = 50.0
mrate = 40.0
Mrate = 120.0
tmax = 50.0
dt = 0.1
relaxation = 20.0
tutor = SimpleNeurons(2, out_fct=lambda _: Mrate*np.random.rand())
reward = MockReward(lambda t: np.sin(10*t/tmax))
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=tau,
constrain_rates=True, min_rate=mrate, max_rate=Mrate,
learning_rate=0.1, relaxation=relaxation, use_tutor_baseline=False)
# reproducible arbitrariness
np.random.seed(1)
M = simulation.StateMonitor(tutor_rule, 'out')
sim = simulation.Simulation(tutor, reward, tutor_rule, M, dt=dt)
sim.run(tmax)
mask = (M.t > tmax - relaxation/2)
mavg = 0.5*(mrate + Mrate)
self.assertAlmostEqual(np.mean(np.abs(M.out[:, mask] - mavg)), 0.0)
def test_relaxation_no_change_beginning(self):
""" Test that non-zero `relaxation` doesn't change beginning of run. """
tau = 25.0
mrate = 40.0
Mrate = 120.0
tmax = 50.0
dt = 0.1
relaxation = 20.0
tutor = SimpleNeurons(2, out_fct=lambda _: Mrate*np.random.rand())
reward = MockReward(lambda t: np.sin(8*t/tmax))
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=tau,
constrain_rates=True, min_rate=mrate, max_rate=Mrate,
learning_rate=0.1, relaxation=None, use_tutor_baseline=False)
# reproducible arbitrariness
np.random.seed(12)
M1 = simulation.StateMonitor(tutor_rule, 'out')
sim1 = simulation.Simulation(tutor, reward, tutor_rule, M1, dt=dt)
sim1.run(tmax)
# now run again with relaxation enabled
tutor_rule.relaxation = relaxation
tutor_rule.reset_rates()
np.random.seed(12)
M2 = simulation.StateMonitor(tutor_rule, 'out')
sim2 = simulation.Simulation(tutor, reward, tutor_rule, M2, dt=dt)
sim2.run(tmax)
mask = (M1.t < tmax - relaxation)
self.assertAlmostEqual(np.mean(np.abs(M1.out[:, mask] - M2.out[:, mask])),
0.0)
self.assertNotAlmostEqual(np.mean(np.abs(M1.out[:, ~mask] -
M2.out[:, ~mask])), 0.0)
def test_relaxation_smooth_monotonic(self):
""" Test that relaxation is smooth, monotonic, and non-constant. """
tau = 45.0
mrate = 40.0
Mrate = 120.0
tmax = 50.0
dt = 0.1
relaxation = 20.0
tutor = SimpleNeurons(2, out_fct=lambda _: Mrate*np.random.rand())
reward = MockReward(lambda t: np.sin(9*t/tmax))
tutor_rule = ReinforcementTutorRule(tutor, reward, tau=tau,
constrain_rates=True, min_rate=mrate, max_rate=Mrate,
learning_rate=0.1, relaxation=None, use_tutor_baseline=False)
# reproducible arbitrariness
np.random.seed(123)
M1 = simulation.StateMonitor(tutor_rule, 'out')
sim1 = simulation.Simulation(tutor, reward, tutor_rule, M1, dt=dt)
sim1.run(tmax)
# now run again with relaxation enabled
tutor_rule.relaxation = relaxation
tutor_rule.reset_rates()
np.random.seed(123)
M2 = simulation.StateMonitor(tutor_rule, 'out')
sim2 = simulation.Simulation(tutor, reward, tutor_rule, M2, dt=dt)
sim2.run(tmax)
mask = ((M1.t >= tmax - relaxation) & (M1.t <= tmax - relaxation/2))
mavg = 0.5*(mrate + Mrate)
ratio = (M2.out[:, mask] - mavg)/(M1.out[:, mask] - mavg)
self.assertAlmostEqual(np.max(np.std(ratio, axis=0)), 0.0)
step_profile = np.mean(ratio, axis=0)
# make sure step is monotonically decreasing, and between 0 and 1
self.assertTrue(np.all(np.diff(step_profile) < 0))
self.assertLessEqual(np.max(step_profile), 1.0)
self.assertGreaterEqual(np.min(step_profile), 0.0)
# make sure the slope isn't *too* big
max_diff = float(dt)/(relaxation / 5.0)
self.assertLess(np.max(-np.diff(step_profile)), max_diff)
##################
# TestRateLayer #
##################
class TestRateLayer(unittest.TestCase):
def test_linear(self):
""" Test layer in linear regime. """
G1 = SimpleNeurons(3)
G2 = SimpleNeurons(2)
G1pattern = np.asarray(
[[ 0, 1, 0, 2],
[-1, 1, 0, 1],
[ 1,-1,-1,-1]])
G2pattern = np.asarray(
[[0, 1, 4, 0],
[1, -1.0/3, -1.0/3, -2.0/3]])
G1.out_fct = lambda i: G1pattern[:, i]
G2.out_fct = lambda i: G2pattern[:, i]
G = RateLayer(2)
G.add_source(G1)
G.add_source(G2)
G.Ws[0] = np.asarray(
[[1, 2, 3],
[1,-2, 1]])
G.Ws[1] = np.asarray([1, -3])
M = simulation.StateMonitor(G, 'out')
dt = 1.0
nsteps = 4
tmax = nsteps*dt
sim = simulation.Simulation(G1, G2, G, M, dt=dt)
sim.run(tmax)
self.assertTrue(np.allclose(M.out[0, :], [1, 1, 1, 1]))
self.assertTrue(np.allclose(M.out[1, :], [0, -1, 0, 1]))
def test_nonlinearity(self):
""" Test application of nonlinearity. """
# reproducible arbitrariness
np.random.seed(1)
N1 = 5
N2 = 4
N = 3
dt = 1.0
nsteps = 10
tmax = nsteps*dt
nonlin = lambda v: np.tanh(v)
G1 = SimpleNeurons(N1)
G2 = SimpleNeurons(N2)
G1pattern = 1 + 2*np.random.randn(N1, nsteps)
G2pattern = -1 + np.random.randn(N2, nsteps)
G1.out_fct = lambda i: G1pattern[:, i]
G2.out_fct = lambda i: G2pattern[:, i]
G = RateLayer(N)
G.add_source(G1)
G.add_source(G2)
G.Ws[0] = np.random.randn(N, N1)
G.Ws[1] = 1 + 3*np.random.randn(N, N2)
M1 = simulation.StateMonitor(G, 'out')
sim1 = simulation.Simulation(G1, G2, G, M1, dt=dt)
sim1.run(tmax)
# test that the run isn't trivial
self.assertGreater(np.mean(np.abs(M1.out)), 1e-3)
# now run again with nonlinearity
G.nonlinearity = nonlin
M2 = simulation.StateMonitor(G, 'out')
sim2 = simulation.Simulation(G1, G2, G, M2, dt=dt)
sim2.run(tmax)
self.assertTrue(np.allclose(M2.out, nonlin(M1.out)))
def test_bias(self):
""" Test neuron bias. """
N = 4
G = RateLayer(N)
bias = [1.0, 0.0, -1.0, -2.0]
G.bias = np.array(bias)
M = simulation.StateMonitor(G, 'out')
sim = simulation.Simulation(G, M, dt=1.0)
sim.run(sim.dt)
self.assertTrue(np.allclose(M.out.ravel(), bias))
####################
# TestRateHVCLayer #
####################
class TestRateHVCLayer(unittest.TestCase):
def test_jitter(self):
""" Test that there are differences in output between trials. """
# some reproducible arbitrariness
np.random.seed(343143)
n = 25
t_max = 50
dt = 0.1
G = RateHVCLayer(n)
M1 = simulation.StateMonitor(G, 'out')
sim1 = simulation.Simulation(G, M1, dt=dt)
sim1.run(t_max)
M2 = simulation.StateMonitor(G, 'out')
sim2 = simulation.Simulation(G, M2, dt=dt)
sim2.run(t_max)
self.assertGreater(np.max(np.abs(M1.out - M2.out)), 0.99)
def test_no_jitter(self):
""" Test that repeated noiseless trials are identical. """
n = 10
t_max = 25
dt = 0.1
G = RateHVCLayer(n)
G.burst_noise = 0.0
M1 = simulation.StateMonitor(G, 'out')
sim1 = simulation.Simulation(G, M1, dt=dt)
sim1.run(t_max)
M2 = simulation.StateMonitor(G, 'out')
sim2 = simulation.Simulation(G, M2, dt=dt)
sim2.run(t_max)
self.assertTrue(np.allclose(M1.out, M2.out))
def test_uniform(self):
""" Test that there are bursts all along the simulation window. """
# some reproducible arbitrariness
np.random.seed(87548)
n = 50
t_max = 50
dt = 0.1
resolution = 1.0
class UniformityChecker(object):
def __init__(self, target, resolution):
self.target = target
self.resolution = resolution
self.order = 1
def prepare(self, t_max, dt):
self.has_spike = np.zeros(int_r(t_max/self.resolution) + 1)
def evolve(self, t, dt):
i = int_r(t/self.resolution)
self.has_spike[i] = (self.has_spike[i] or np.any(self.target.out > 0))
G = RateHVCLayer(n)
M = UniformityChecker(G, resolution)
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
self.assertTrue(np.all(M.has_spike))
def test_burst(self):
""" Test that each neuron fires a burst of given width. """
n = 25
t_max = 50
dt = 0.1
G = RateHVCLayer(n)
G.burst_noise = 0.0
M = simulation.StateMonitor(G, 'out')
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
# find burst lengths for each neuron
nonzero_len = lambda v: max((v>0).nonzero()[0]) - min((v>0).nonzero()[0])
burst_lengths = [dt*nonzero_len(M.out[i]) for i in xrange(n)]
self.assertLess(np.std(burst_lengths), dt/2)
self.assertLess(np.abs(np.mean(burst_lengths) - float(t_max)/n),
(1 + 1e-6)*dt)
def test_burst_dispersion(self):
""" Test that starting times of bursts are within required bounds. """
# some reproducible arbitrariness
np.random.seed(7342642)
n = 25
t_max = 50
dt = 0.1
n_sim = 10
G = RateHVCLayer(n)
burst_starts = []
for i in xrange(n_sim):
M = simulation.StateMonitor(G, 'out')
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
burst_starts.append([dt*min((M.out[i] > 0).nonzero()[0])
for i in xrange(n)])
burst_starts_range = [np.ptp([_[i] for _ in burst_starts])
for i in xrange(n)]
self.assertLess(np.max(burst_starts_range), G.burst_noise + dt/2)
def test_burst_tmax(self):
""" Test using a different end time for bursts than for simulation. """
n = 10
t_max = 25
dt = 0.1
G = RateHVCLayer(n)
G.burst_noise = 0.0
M1 = simulation.StateMonitor(G, 'out')
sim1 = simulation.Simulation(G, M1, dt=dt)
sim1.run(t_max)
G = RateHVCLayer(n, burst_tmax=t_max)
G.burst_noise = 0.0
M2 = simulation.StateMonitor(G, 'out')
sim2 = simulation.Simulation(G, M2, dt=dt)
sim2.run(2*t_max)
self.assertTrue(np.allclose(M1.out, M2.out[:, :M1.out.shape[1]]))
def test_custom_length(self):
""" Test custom burst length. """
n = 25
t_max = 50
dt = 0.1
burst_length = 10.0
G = RateHVCLayer(n, burst_length=burst_length)
G.burst_noise = 0.0
M = simulation.StateMonitor(G, 'out')
sim = simulation.Simulation(G, M, dt=dt)
sim.run(t_max)
# find burst lengths for each neuron
nonzero_len = lambda v: max((v>0).nonzero()[0]) - min((v>0).nonzero()[0])
burst_lengths = [dt*nonzero_len(M.out[i]) for i in xrange(n)]
self.assertLess(np.std(burst_lengths), dt/2)
self.assertLess(np.abs(np.mean(burst_lengths) - burst_length),
(1 + 1e-6)*dt)
####################
# TestTableLayer #
####################
class TestTableLayer(unittest.TestCase):
def test_output(self):
""" Test that the output matches the table provided. """
# reproducible arbitrariness
np.random.seed(123423)
N = 20
tmax = 30.0
dt = 1.0
n_steps = int_r(tmax/dt)
table = np.random.randn(N, n_steps)
G = TableLayer(table)
M = simulation.StateMonitor(G, 'out')
sim = simulation.Simulation(G, M, dt=dt)
sim.run(tmax)
self.assertLess(np.max(np.abs(M.out - table)), 1e-6)
####################
# TestConnector #
####################
class TestConnector(unittest.TestCase):
def test_transfer(self):
""" Test transfer of data using connector. """
class MockSender(object):
def evolve(self, t, dt):
self.x = t**2 + t + 1
class MockReceiver(object):
def __init__(self):
self.rec = None
def evolve(self, t, dt):
pass
S = MockSender()
R = MockReceiver()
C = Connector(S, 'x', R, 'rec', order=0.5)
tmax = 10.0
dt = 0.1
M = simulation.StateMonitor(R, 'rec')
sim = simulation.Simulation(S, R, C, M, dt=dt)
sim.run(tmax)
expected = (M.t**2 + M.t + 1)
self.assertAlmostEqual(np.mean(np.abs(expected - M.rec[0])), 0.0)
if __name__ == '__main__':
unittest.main()
|
ttesileanu/twostagelearning
|
tests.py
|
Python
|
mit
| 99,178
|
[
"NEURON"
] |
1070dfbb2a34a06ee0933e5c025993aa4b3bf1271e01a8a36e63e8b9422e1912
|
"""
Page objects for interacting with the test site.
"""
import os
import time
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from bok_choy.javascript import js_defined, requirejs, wait_for_js
class SitePage(PageObject):
"""
Base class for all pages in the test site.
"""
# Get the server port from the environment
# (set by the test runner script)
SERVER_PORT = os.environ.get("SERVER_PORT", 8005)
def is_browser_on_page(self):
title = self.name.lower().replace('_', ' ')
return title in self.browser.title.lower()
@property
def url(self):
return "http://localhost:{}/{}".format(self.SERVER_PORT, self.name + ".html")
@property
def output(self):
"""
Return the contents of the "#output" div on the page.
The fixtures are configured to update this div when the user
interacts with the page.
"""
text_list = self.q(css='#output').text
if len(text_list) < 1:
return None
return text_list[0]
class ButtonPage(SitePage):
"""
Page for testing button interactions.
"""
name = "button"
def click_button(self):
"""
Click the button on the page, which should cause the JavaScript
to update the #output div.
"""
self.q(css='div#fixture input').first.click()
class TextFieldPage(SitePage):
"""
Page for testing text field interactions.
"""
name = "text_field"
def enter_text(self, text):
"""
Input `text` into the text field on the page.
"""
self.q(css='#fixture input').fill(text)
class SelectPage(SitePage):
"""
Page for testing select input interactions.
"""
name = "select"
def select_car(self, car_value):
"""
Select the car with ``car_value`` in the drop-down list.
"""
self.q(css=f'select[name="cars"] option[value="{car_value}"]').first.click()
def is_car_selected(self, car):
"""
Return ``True`` if the given ``car`` is selected, ``False`` otherwise.
"""
return self.q(css=f'select[name="cars"] option[value="{car}"]').selected
class CheckboxPage(SitePage):
"""
Page for testing checkbox interactions.
"""
name = "checkbox"
def toggle_pill(self, pill_name):
"""
Toggle the box for the pill with `pill_name` (red or blue).
"""
self.q(css=f"#fixture input#{pill_name}").first.click()
class AlertPage(SitePage):
"""
Page for testing alert handling.
"""
name = "alert"
def confirm(self):
"""
Click the ``Confirm`` button and confirm the dialog.
"""
with self.handle_alert(confirm=True):
self.q(css='button#confirm').first.click()
def cancel(self):
"""
Click the ``Confirm`` button and cancel the dialog.
"""
with self.handle_alert(confirm=False):
self.q(css='button#confirm').first.click()
def dismiss(self):
"""
Click the ``Alert`` button and confirm the alert.
"""
with self.handle_alert():
self.q(css='button#alert').first.click()
class SelectorPage(SitePage):
"""
Page for testing retrieval of information by CSS selectors.
"""
name = "selector"
@property
def num_divs(self):
"""
Count the number of div.test elements.
"""
return len(self.q(css='div.test').results)
@property
def div_text_list(self):
"""
Return list of text for each div.test element.
"""
return self.q(css='div.test').text
@property
def div_value_list(self):
"""
Return list of values for each div.test element.
"""
return self.q(css='div.test').attrs('value')
@property
def div_html_list(self):
"""
Return list of html for each div.test element.
"""
return self.q(css='div.test').html
def ids_of_outer_divs_with_inner_text(self, child_text):
"""
Return a list of the ids of outer divs with
the specified text in a child element.
"""
return self.q(css='div.outer').filter(
lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')]
).attrs('id')
class DelayPage(SitePage):
"""
Page for testing elements that appear after a delay.
"""
name = "delay"
def trigger_output(self):
"""
Wait for click handlers to be installed,
then click a button and retrieve the output that appears
after a delay.
"""
EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill()
self.q(css='div#fixture button').first.click()
EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill()
def make_broken_promise(self):
"""
Make a promise that will not be fulfilled.
Should raise a `BrokenPromise` exception.
"""
return EmptyPromise(
self.q(css='div#not_present').is_present, "Invalid div appeared",
try_limit=3, try_interval=0.01
).fulfill()
class SlowPage(SitePage):
"""
Page that loads its elements slowly.
"""
name = "slow"
def is_browser_on_page(self):
return self.q(css='div#ready').is_present()
class NextPage(SitePage):
"""
Page that loads another page after a delay.
"""
name = "next_page"
def is_browser_on_page(self):
return self.q(css='#next').is_present()
def load_next(self, page, delay_sec):
"""
Load the page named `page_name` after waiting for `delay_sec`.
"""
time.sleep(delay_sec)
page.visit()
@js_defined('$')
class FocusedPage(SitePage):
"""
Page that has a link to a focusable element.
"""
name = "focused"
@wait_for_js
def focus_on_main_content(self):
"""
Give focus to the element with the ``main-content`` ID.
"""
self.browser.execute_script("$('#main-content').focus()")
class VisiblePage(SitePage):
"""
Page that has some elements visible and others invisible.
"""
name = "visible"
def is_visible(self, name):
"""
Return a boolean indicating whether the given item is visible.
"""
return self.q(css=f"div.{name}").first.visible
def is_invisible(self, name):
"""
Return a boolean indicating whether the given element is present, but not visible.
"""
return self.q(css=f"div.{name}").first.invisible
@js_defined('test_var1', 'test_var2')
class JavaScriptPage(SitePage):
"""
Page for testing asynchronous JavaScript.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@wait_for_js
def reload_and_trigger_output(self):
"""
Reload the page, wait for JS, then trigger the output.
"""
self.browser.refresh()
self.wait_for_js() # pylint: disable=no-member
self.q(css='div#fixture button').first.click()
@js_defined('something.SomethingThatDoesntExist')
class JavaScriptUndefinedPage(SitePage):
"""
Page for testing asynchronous JavaScript, where the
javascript that we wait for is never defined.
"""
name = "javascript"
@wait_for_js
def trigger_output(self):
"""
Click a button which will only work once RequireJS finishes loading.
"""
self.q(css='div#fixture button').first.click()
@requirejs('main')
class RequireJSPage(SitePage):
"""
Page for testing asynchronous JavaScript loaded with RequireJS.
"""
name = "requirejs"
@property
@wait_for_js
def output(self):
"""
Wait for scripts to finish and then return the contents of the
``#output`` div on the page.
"""
return super().output
class AjaxNoJQueryPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax_no_jquery"
class AjaxPage(SitePage):
"""
Page for testing an ajax call.
"""
name = "ajax"
def click_button(self):
"""
Click the button on the page, which triggers an ajax
call that updates the #output div.
"""
self.q(css='div#fixture button').first.click()
class WaitsPage(SitePage):
"""
Page for testing wait helpers.
"""
name = "wait"
def is_button_output_present(self):
"""
Click button and wait until output id appears in DOM.
"""
self.wait_for_element_presence('div#ready', 'Page is Ready')
self.q(css='div#fixture button').first.click()
self.wait_for_element_presence('div#output', 'Button Output is Available')
def is_class_absent(self):
"""
Click button and wait until playing class disappeared from DOM
"""
self.q(css='#spinner').first.click()
self.wait_for_element_absence('.playing', 'Animation Stopped')
def is_button_output_visible(self):
"""
Click button and wait until output is displayed.
"""
self.wait_for_element_presence('div#ready', 'Page is Ready')
self.q(css='div#fixture button').first.click()
self.wait_for_element_visibility('div#output', 'Button Output is Visible')
def is_spinner_invisible(self):
"""
Click button and wait until spinner is disappeared.
"""
self.q(css='#spinner').first.click()
self.wait_for_element_invisibility('#anim', 'Button Output is Visible')
class AccessibilityPage(SitePage):
"""
Page for testing accessibility auditing.
"""
name = "accessibility"
class ImagePage(SitePage):
"""
Page for testing image capture and comparison.
"""
name = "image"
class LongPage(SitePage):
"""
Page that requires scrolling to get to certain elements.
"""
name = "long_page"
|
edx/bok-choy
|
tests/pages.py
|
Python
|
apache-2.0
| 10,245
|
[
"VisIt"
] |
dcc1d02e5e58d093fed5919dd7d9d6d377a5031bc507930b6004923dd635ac02
|
from __future__ import division
__all__ = \
['Gaussian', 'GaussianFixedMean', 'GaussianFixedCov', 'GaussianFixed',
'GaussianNonConj', 'DiagonalGaussian', 'DiagonalGaussianNonconjNIG',
'IsotropicGaussian', 'ScalarGaussianNIX', 'ScalarGaussianNonconjNIX',
'ScalarGaussianNonconjNIG', 'ScalarGaussianFixedvar']
import numpy as np
from numpy import newaxis as na
from numpy.core.umath_tests import inner1d
import scipy.linalg
import scipy.stats as stats
import scipy.special as special
import copy
from pybasicbayes.abstractions import GibbsSampling, MeanField, \
MeanFieldSVI, Collapsed, MaxLikelihood, MAP, Tempering
from pybasicbayes.distributions.meta import _FixedParamsMixin
from pybasicbayes.util.stats import sample_niw, invwishart_entropy, \
sample_invwishart, invwishart_log_partitionfunction, \
getdatasize, flattendata, getdatadimension, \
combinedata, multivariate_t_loglik, gi
weps = 1e-12
class _GaussianBase(object):
@property
def params(self):
return dict(mu=self.mu, sigma=self.sigma)
@property
def D(self):
return self.mu.shape[0]
### internals
def getsigma(self):
return self._sigma
def setsigma(self,sigma):
self._sigma = sigma
self._sigma_chol = None
sigma = property(getsigma,setsigma)
@property
def sigma_chol(self):
if not hasattr(self,'_sigma_chol') or self._sigma_chol is None:
self._sigma_chol = np.linalg.cholesky(self.sigma)
return self._sigma_chol
### distribution stuff
def rvs(self,size=None):
size = 1 if size is None else size
size = size + (self.mu.shape[0],) if isinstance(size,tuple) \
else (size,self.mu.shape[0])
return self.mu + np.random.normal(size=size).dot(self.sigma_chol.T)
def log_likelihood(self,x):
try:
mu, D = self.mu, self.D
sigma_chol = self.sigma_chol
bads = np.isnan(np.atleast_2d(x)).any(axis=1)
x = np.nan_to_num(x).reshape((-1,D)) - mu
xs = scipy.linalg.solve_triangular(sigma_chol,x.T,lower=True)
out = -1./2. * inner1d(xs.T,xs.T) - D/2*np.log(2*np.pi) \
- np.log(sigma_chol.diagonal()).sum()
out[bads] = 0
return out
except np.linalg.LinAlgError:
# NOTE: degenerate distribution doesn't have a density
return np.repeat(-np.inf,x.shape[0])
### plotting
# TODO making animations, this seems to generate an extra notebook figure
_scatterplot = None
_parameterplot = None
def plot(self,ax=None,data=None,indices=None,color='b',
plot_params=True,label='',alpha=1.,
update=False,draw=True):
import matplotlib.pyplot as plt
from pybasicbayes.util.plot import project_data, \
plot_gaussian_projection, plot_gaussian_2D
ax = ax if ax else plt.gca()
D = self.D
if data is not None:
data = flattendata(data)
if data is not None:
if D > 2:
plot_basis = np.random.RandomState(seed=0).randn(2,D)
data = project_data(data,plot_basis)
if update and self._scatterplot is not None:
self._scatterplot.set_offsets(data)
self._scatterplot.set_color(color)
else:
self._scatterplot = ax.scatter(
data[:,0],data[:,1],marker='.',color=color)
if plot_params:
if D > 2:
plot_basis = np.random.RandomState(seed=0).randn(2,D)
self._parameterplot = \
plot_gaussian_projection(
self.mu,self.sigma,plot_basis,
color=color,label=label,alpha=min(1-1e-3,alpha),
ax=ax, artists=self._parameterplot if update else None)
else:
self._parameterplot = \
plot_gaussian_2D(
self.mu,self.sigma,color=color,label=label,
alpha=min(1-1e-3,alpha), ax=ax,
artists=self._parameterplot if update else None)
if draw:
plt.draw()
return [self._scatterplot] + list(self._parameterplot)
def to_json_dict(self):
D = self.mu.shape[0]
assert D == 2
U,s,_ = np.linalg.svd(self.sigma)
U /= np.linalg.det(U)
theta = np.arctan2(U[0,0],U[0,1])*180/np.pi
return {'x':self.mu[0],'y':self.mu[1],'rx':np.sqrt(s[0]),
'ry':np.sqrt(s[1]), 'theta':theta}
class Gaussian(
_GaussianBase, GibbsSampling, MeanField, MeanFieldSVI,
Collapsed, MAP, MaxLikelihood):
'''
Multivariate Gaussian distribution class.
NOTE: Only works for 2 or more dimensions. For a scalar Gaussian, use a
scalar class. Uses a conjugate Normal/Inverse-Wishart prior.
Hyperparameters mostly follow Gelman et al.'s notation in Bayesian Data
Analysis:
nu_0, sigma_0, mu_0, kappa_0
Parameters are mean and covariance matrix:
mu, sigma
'''
def __init__(
self, mu=None, sigma=None,
mu_0=None, sigma_0=None, kappa_0=None, nu_0=None):
self.mu = mu
self.sigma = sigma
self.mu_0 = self.mu_mf = mu_0
self.sigma_0 = self.sigma_mf = sigma_0
self.kappa_0 = self.kappa_mf = kappa_0
self.nu_0 = self.nu_mf = nu_0
# NOTE: resampling will set mu_mf and sigma_mf if necessary
if mu is sigma is None \
and not any(_ is None for _ in (mu_0,sigma_0,kappa_0,nu_0)):
self.resample() # initialize from prior
if mu is not None and sigma is not None \
and not any(_ is None for _ in (mu_0,sigma_0,kappa_0,nu_0)):
self.mu_mf = mu
self.sigma_mf = sigma * (self.nu_0 - self.mu_mf.shape[0] - 1)
@property
def hypparams(self):
return dict(
mu_0=self.mu_0,sigma_0=self.sigma_0,
kappa_0=self.kappa_0,nu_0=self.nu_0)
@property
def natural_hypparam(self):
return self._standard_to_natural(
self.mu_0,self.sigma_0,self.kappa_0,self.nu_0)
@natural_hypparam.setter
def natural_hypparam(self,natparam):
self.mu_0, self.sigma_0, self.kappa_0, self.nu_0 = \
self._natural_to_standard(natparam)
def _standard_to_natural(self,mu_mf,sigma_mf,kappa_mf,nu_mf):
D = sigma_mf.shape[0]
out = np.zeros((D+2,D+2))
out[:D,:D] = sigma_mf + kappa_mf * np.outer(mu_mf,mu_mf)
out[:D,-2] = out[-2,:D] = kappa_mf * mu_mf
out[-2,-2] = kappa_mf
out[-1,-1] = nu_mf + 2 + D
return out
def _natural_to_standard(self,natparam):
D = natparam.shape[0]-2
A = natparam[:D,:D]
b = natparam[:D,-2]
c = natparam[-2,-2]
d = natparam[-1,-1]
return b/c, A - np.outer(b,b)/c, c, d - 2 - D
@property
def num_parameters(self):
D = self.D
return D*(D+1)/2
@property
def D(self):
if self.mu is not None:
return self.mu.shape[0]
elif self.mu_0 is not None:
return self.mu_0.shape[0]
def _get_statistics(self,data,D=None):
if D is None:
D = self.D if self.D is not None else getdatadimension(data)
out = np.zeros((D+2,D+2))
if isinstance(data,np.ndarray):
out[:D,:D] = data.T.dot(data)
out[-2,:D] = out[:D,-2] = data.sum(0)
out[-2,-2] = out[-1,-1] = data.shape[0]
return out
else:
return sum(map(self._get_statistics,data),out)
def _get_weighted_statistics(self,data,weights,D=None):
D = getdatadimension(data) if D is None else D
out = np.zeros((D+2,D+2))
if isinstance(data,np.ndarray):
out[:D,:D] = data.T.dot(weights[:,na]*data)
out[-2,:D] = out[:D,-2] = weights.dot(data)
out[-2,-2] = out[-1,-1] = weights.sum()
return out
else:
return sum(map(self._get_weighted_statistics,data,weights),out)
def _get_empty_statistics(self, D):
out = np.zeros((D+2,D+2))
return out
def empirical_bayes(self,data):
self.natural_hypparam = self._get_statistics(data)
self.resample() # intialize from prior given new hyperparameters
return self
### Gibbs sampling
def resample(self,data=[]):
D = len(self.mu_0)
self.mu, self.sigma = \
sample_niw(*self._natural_to_standard(
self.natural_hypparam + self._get_statistics(data,D)))
# NOTE: next lines let Gibbs sampling initialize mean
nu = self.nu_mf if hasattr(self,'nu_mf') and self.nu_mf \
else self.nu_0
self.mu_mf, self._sigma_mf = self.mu, self.sigma * (nu - D - 1)
return self
def copy_sample(self):
new = copy.copy(self)
new.mu = self.mu.copy()
new.sigma = self.sigma.copy()
return new
### Mean Field
def _resample_from_mf(self):
self.mu, self.sigma = \
sample_niw(*self._natural_to_standard(
self.mf_natural_hypparam))
return self
def meanfieldupdate(self,data,weights):
D = len(self.mu_0)
self.mf_natural_hypparam = \
self.natural_hypparam + self._get_weighted_statistics(
data, weights, D)
def meanfield_sgdstep(self,data,weights,minibatchfrac,stepsize):
D = len(self.mu_0)
self.mf_natural_hypparam = \
(1-stepsize) * self.mf_natural_hypparam + stepsize * (
self.natural_hypparam
+ 1./minibatchfrac
* self._get_weighted_statistics(data,weights,D))
@property
def mf_natural_hypparam(self):
return self._standard_to_natural(
self.mu_mf,self.sigma_mf,self.kappa_mf,self.nu_mf)
@mf_natural_hypparam.setter
def mf_natural_hypparam(self,natparam):
self.mu_mf, self.sigma_mf, self.kappa_mf, self.nu_mf = \
self._natural_to_standard(natparam)
# NOTE: next line is for plotting
self.mu, self.sigma = \
self.mu_mf, self.sigma_mf/(self.nu_mf - self.mu_mf.shape[0] - 1)
@property
def sigma_mf(self):
return self._sigma_mf
@sigma_mf.setter
def sigma_mf(self,val):
self._sigma_mf = val
self._sigma_mf_chol = None
@property
def sigma_mf_chol(self):
if self._sigma_mf_chol is None:
self._sigma_mf_chol = np.linalg.cholesky(self.sigma_mf)
return self._sigma_mf_chol
def get_vlb(self):
D = len(self.mu_0)
loglmbdatilde = self._loglmbdatilde()
# see Eq. 10.77 in Bishop
q_entropy = -0.5 * (loglmbdatilde + D * (np.log(self.kappa_mf/(2*np.pi))-1)) \
+ invwishart_entropy(self.sigma_mf,self.nu_mf)
# see Eq. 10.74 in Bishop, we aren't summing over K
p_avgengy = 0.5 * (D * np.log(self.kappa_0/(2*np.pi)) + loglmbdatilde
- D*self.kappa_0/self.kappa_mf - self.kappa_0*self.nu_mf*
np.dot(self.mu_mf -
self.mu_0,np.linalg.solve(self.sigma_mf,self.mu_mf - self.mu_0))) \
+ invwishart_log_partitionfunction(self.sigma_0,self.nu_0) \
+ (self.nu_0 - D - 1)/2*loglmbdatilde - 1/2*self.nu_mf \
* np.linalg.solve(self.sigma_mf,self.sigma_0).trace()
return p_avgengy + q_entropy
def expected_log_likelihood(self,x):
mu_n, kappa_n, nu_n = self.mu_mf, self.kappa_mf, self.nu_mf
D = len(mu_n)
x = np.reshape(x,(-1,D)) - mu_n # x is now centered
xs = np.linalg.solve(self.sigma_mf_chol,x.T)
# see Eqs. 10.64, 10.67, and 10.71 in Bishop
return self._loglmbdatilde()/2 - D/(2*kappa_n) - nu_n/2 * \
inner1d(xs.T,xs.T) - D/2*np.log(2*np.pi)
def _loglmbdatilde(self):
# see Eq. 10.65 in Bishop
D = len(self.mu_0)
chol = self.sigma_mf_chol
return special.digamma((self.nu_mf-np.arange(D))/2.).sum() \
+ D*np.log(2) - 2*np.log(chol.diagonal()).sum()
### Collapsed
def log_marginal_likelihood(self,data):
n, D = getdatasize(data), len(self.mu_0)
return self._log_partition_function(
*self._natural_to_standard(
self.natural_hypparam + self._get_statistics(data,D))) \
- self._log_partition_function(self.mu_0,self.sigma_0,self.kappa_0,self.nu_0) \
- n*D/2 * np.log(2*np.pi)
def _log_partition_function(self,mu,sigma,kappa,nu):
D = len(mu)
chol = np.linalg.cholesky(sigma)
return nu*D/2*np.log(2) + special.multigammaln(nu/2,D) + D/2*np.log(2*np.pi/kappa) \
- nu*np.log(chol.diagonal()).sum()
def log_predictive_studentt_datapoints(self,datapoints,olddata):
D = len(self.mu_0)
mu_n, sigma_n, kappa_n, nu_n = \
self._natural_to_standard(
self.natural_hypparam + self._get_statistics(olddata,D))
return multivariate_t_loglik(
datapoints,nu_n-D+1,mu_n,(kappa_n+1)/(kappa_n*(nu_n-D+1))*sigma_n)
def log_predictive_studentt(self,newdata,olddata):
newdata = np.atleast_2d(newdata)
return sum(self.log_predictive_studentt_datapoints(
d,combinedata((olddata,newdata[:i])))[0] for i,d in enumerate(newdata))
### Max likelihood
def max_likelihood(self,data,weights=None):
D = getdatadimension(data)
if weights is None:
statmat = self._get_statistics(data,D)
else:
statmat = self._get_weighted_statistics(data,weights,D)
n, x, xxt = statmat[-1,-1], statmat[-2,:D], statmat[:D,:D]
# this SVD is necessary to check if the max likelihood solution is
# degenerate, which can happen in the EM algorithm
if n < D or (np.linalg.svd(xxt,compute_uv=False) > 1e-6).sum() < D:
self.broken = True
self.mu = 99999999*np.ones(D)
self.sigma = np.eye(D)
else:
self.mu = x/n
self.sigma = xxt/n - np.outer(self.mu,self.mu)
return self
def MAP(self,data,weights=None):
D = getdatadimension(data)
# max likelihood with prior pseudocounts included in data
if weights is None:
statmat = self._get_statistics(data)
else:
statmat = self._get_weighted_statistics(data,weights)
statmat += self.natural_hypparam
n, x, xxt = statmat[-1,-1], statmat[-2,:D], statmat[:D,:D]
self.mu = x/n
self.sigma = xxt/n - np.outer(self.mu,self.mu)
return self
class GaussianFixedMean(_GaussianBase, GibbsSampling, MaxLikelihood):
def __init__(self,mu=None,sigma=None,nu_0=None,lmbda_0=None):
self.sigma = sigma
self.mu = mu
self.nu_0 = nu_0
self.lmbda_0 = lmbda_0
if sigma is None and not any(_ is None for _ in (nu_0,lmbda_0)):
self.resample() # initialize from prior
@property
def hypparams(self):
return dict(nu_0=self.nu_0,lmbda_0=self.lmbda_0)
@property
def num_parameters(self):
D = len(self.mu)
return D*(D+1)/2
def _get_statistics(self,data):
n = getdatasize(data)
if n > 1e-4:
if isinstance(data,np.ndarray):
centered = data[gi(data)] - self.mu
sumsq = centered.T.dot(centered)
n = len(centered)
else:
sumsq = sum((d[gi(d)]-self.mu).T.dot(d[gi(d)]-self.mu) for d in data)
else:
sumsq = None
return n, sumsq
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
if neff > weps:
centered = data - self.mu
sumsq = centered.T.dot(weights[:,na]*centered)
else:
sumsq = None
else:
neff = sum(w.sum() for w in weights)
if neff > weps:
sumsq = sum((d-self.mu).T.dot(w[:,na]*(d-self.mu)) for w,d in zip(weights,data))
else:
sumsq = None
return neff, sumsq
def _posterior_hypparams(self,n,sumsq):
nu_0, lmbda_0 = self.nu_0, self.lmbda_0
if n > 1e-4:
nu_0 = nu_0 + n
sigma_n = self.lmbda_0 + sumsq
return sigma_n, nu_0
else:
return lmbda_0, nu_0
### Gibbs sampling
def resample(self, data=[]):
self.sigma = sample_invwishart(*self._posterior_hypparams(
*self._get_statistics(data)))
return self
### Max likelihood
def max_likelihood(self,data,weights=None):
D = getdatadimension(data)
if weights is None:
n, sumsq = self._get_statistics(data)
else:
n, sumsq = self._get_weighted_statistics(data,weights)
if n < D or (np.linalg.svd(sumsq,compute_uv=False) > 1e-6).sum() < D:
# broken!
self.sigma = np.eye(D)*1e-9
self.broken = True
else:
self.sigma = sumsq/n
return self
class GaussianFixedCov(_GaussianBase, GibbsSampling, MaxLikelihood):
# See Gelman's Bayesian Data Analysis notation around Eq. 3.18, p. 85
# in 2nd Edition. We replaced \Lambda_0 with sigma_0 since it is a prior
# *covariance* matrix rather than a precision matrix.
def __init__(self,mu=None,sigma=None,mu_0=None,sigma_0=None):
self.mu = mu
self.sigma = sigma
self.mu_0 = mu_0
self.sigma_0 = sigma_0
if mu is None and not any(_ is None for _ in (mu_0,sigma_0)):
self.resample()
@property
def hypparams(self):
return dict(mu_0=self.mu_0,sigma_0=self.sigma_0)
@property
def sigma_inv(self):
if not hasattr(self,'_sigma_inv'):
self._sigma_inv = np.linalg.inv(self.sigma)
return self._sigma_inv
@property
def sigma_inv_0(self):
if not hasattr(self,'_sigma_inv_0'):
self._sigma_inv_0 = np.linalg.inv(self.sigma_0)
return self._sigma_inv_0
@property
def num_parameters(self):
return len(self.mu)
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
if isinstance(data,np.ndarray):
xbar = data.mean(0)
else:
xbar = sum(d.sum(0) for d in data) / n
else:
xbar = None
return n, xbar
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
if neff > weps:
xbar = weights.dot(data) / neff
else:
xbar = None
else:
neff = sum(w.sum() for w in weights)
if neff > weps:
xbar = sum(w.dot(d) for w,d in zip(weights,data)) / neff
else:
xbar = None
return neff, xbar
def _posterior_hypparams(self,n,xbar):
# It seems we should be working with lmbda and sigma inv (unless lmbda
# is a covariance, not a precision)
sigma_inv, mu_0, sigma_inv_0 = self.sigma_inv, self.mu_0, self.sigma_inv_0
if n > 0:
sigma_inv_n = n*sigma_inv + sigma_inv_0
mu_n = np.linalg.solve(
sigma_inv_n, sigma_inv_0.dot(mu_0) + n*sigma_inv.dot(xbar))
return mu_n, sigma_inv_n
else:
return mu_0, sigma_inv_0
### Gibbs sampling
def resample(self,data=[]):
mu_n, sigma_n_inv = self._posterior_hypparams(*self._get_statistics(data))
D = len(mu_n)
L = np.linalg.cholesky(sigma_n_inv)
self.mu = scipy.linalg.solve_triangular(L,np.random.normal(size=D),lower=True) \
+ mu_n
return self
### Max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, xbar = self._get_statistics(data)
else:
n, xbar = self._get_weighted_statistics(data,weights)
self.mu = xbar
return self
class GaussianFixed(_FixedParamsMixin, Gaussian):
def __init__(self,mu,sigma):
self.mu = mu
self.sigma = sigma
class GaussianNonConj(_GaussianBase, GibbsSampling):
def __init__(self,mu=None,sigma=None,
mu_0=None,mu_lmbda_0=None,nu_0=None,sigma_lmbda_0=None):
self._sigma_distn = GaussianFixedMean(mu=mu,
nu_0=nu_0,lmbda_0=sigma_lmbda_0,sigma=sigma)
self._mu_distn = GaussianFixedCov(sigma=self._sigma_distn.sigma,
mu_0=mu_0, sigma_0=mu_lmbda_0,mu=mu)
self._sigma_distn.mu = self._mu_distn.mu
@property
def hypparams(self):
d = self._mu_distn.hypparams
d.update(**self._sigma_distn.hypparams)
return d
def _get_mu(self):
return self._mu_distn.mu
def _set_mu(self,val):
self._mu_distn.mu = val
self._sigma_distn.mu = val
mu = property(_get_mu,_set_mu)
def _get_sigma(self):
return self._sigma_distn.sigma
def _set_sigma(self,val):
self._sigma_distn.sigma = val
self._mu_distn.sigma = val
sigma = property(_get_sigma,_set_sigma)
### Gibbs sampling
def resample(self,data=[],niter=1):
if getdatasize(data) == 0:
niter = 1
# TODO this is kinda dumb because it collects statistics over and over
# instead of updating them...
for itr in xrange(niter):
# resample mu
self._mu_distn.sigma = self._sigma_distn.sigma
self._mu_distn.resample(data)
# resample sigma
self._sigma_distn.mu = self._mu_distn.mu
self._sigma_distn.resample(data)
return self
# TODO collapsed
class DiagonalGaussian(_GaussianBase,GibbsSampling,MaxLikelihood,MeanField,Tempering):
'''
Product of normal-inverse-gamma priors over mu (mean vector) and sigmas
(vector of scalar variances).
The prior follows
sigmas ~ InvGamma(alphas_0,betas_0) iid
mu | sigma ~ N(mu_0,1/nus_0 * diag(sigmas))
It allows placing different prior hyperparameters on different components.
'''
def __init__(self,mu=None,sigmas=None,mu_0=None,nus_0=None,alphas_0=None,betas_0=None):
# all the s's refer to the fact that these are vectors of length
# len(mu_0) OR scalars
if mu_0 is not None:
D = mu_0.shape[0]
if nus_0 is not None and \
(isinstance(nus_0,int) or isinstance(nus_0,float)):
nus_0 = nus_0*np.ones(D)
if alphas_0 is not None and \
(isinstance(alphas_0,int) or isinstance(alphas_0,float)):
alphas_0 = alphas_0*np.ones(D)
if betas_0 is not None and \
(isinstance(betas_0,int) or isinstance(betas_0,float)):
betas_0 = betas_0*np.ones(D)
self.mu_0 = self.mf_mu = mu_0
self.nus_0 = self.mf_nus = nus_0
self.alphas_0 = self.mf_alphas = alphas_0
self.betas_0 = self.mf_betas = betas_0
self.mu = mu
self.sigmas = sigmas
assert self.mu is None or (isinstance(self.mu,np.ndarray) and not isinstance(self.mu,np.ma.MaskedArray))
assert self.sigmas is None or (isinstance(self.sigmas,np.ndarray) and not isinstance(self.sigmas,np.ma.MaskedArray))
if mu is sigmas is None \
and not any(_ is None for _ in (mu_0,nus_0,alphas_0,betas_0)):
self.resample() # intialize from prior
### the basics!
@property
def parameters(self):
return self.mu, self.sigmas
@parameters.setter
def parameters(self, mu_sigmas_tuple):
(mu,sigmas) = mu_sigmas_tuple
self.mu, self.sigmas = mu, sigmas
@property
def sigma(self):
return np.diag(self.sigmas)
@sigma.setter
def sigma(self,val):
val = np.array(val)
assert val.ndim in (1,2)
if val.ndim == 1:
self.sigmas = val
else:
self.sigmas = np.diag(val)
@property
def hypparams(self):
return dict(mu_0=self.mu_0,nus_0=self.nus_0,
alphas_0=self.alphas_0,betas_0=self.betas_0)
def rvs(self,size=None):
size = np.array(size,ndmin=1)
return np.sqrt(self.sigmas)*\
np.random.normal(size=np.concatenate((size,self.mu.shape))) + self.mu
def log_likelihood(self,x,temperature=1.):
mu, sigmas, D = self.mu, self.sigmas * temperature, self.mu.shape[0]
x = np.reshape(x,(-1,D))
Js = -1./(2*sigmas)
return (np.einsum('ij,ij,j->i',x,x,Js) - np.einsum('ij,j,j->i',x,2*mu,Js)) \
+ (mu**2*Js - 1./2*np.log(2*np.pi*sigmas)).sum()
### posterior updating stuff
@property
def natural_hypparam(self):
return self._standard_to_natural(self.alphas_0,self.betas_0,self.mu_0,self.nus_0)
@natural_hypparam.setter
def natural_hypparam(self,natparam):
self.alphas_0, self.betas_0, self.mu_0, self.nus_0 = \
self._natural_to_standard(natparam)
def _standard_to_natural(self,alphas,betas,mu,nus):
return np.array([2*betas + nus * mu**2, nus*mu, nus, 2*alphas])
def _natural_to_standard(self,natparam):
nus = natparam[2]
mu = natparam[1] / nus
alphas = natparam[3]/2.
betas = (natparam[0] - nus*mu**2) / 2.
return alphas, betas, mu, nus
def _get_statistics(self,data):
if isinstance(data,np.ndarray) and data.shape[0] > 0:
data = data[gi(data)]
ns = np.repeat(*data.shape)
return np.array([
np.einsum('ni,ni->i',data,data),
np.einsum('ni->i',data),
ns,
ns,
])
else:
return sum((self._get_statistics(d) for d in data), self._empty_stats())
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
idx = ~np.isnan(data).any(1)
data = data[idx]
weights = weights[idx]
assert data.ndim == 2 and weights.ndim == 1 \
and data.shape[0] == weights.shape[0]
neff = np.repeat(weights.sum(),data.shape[1])
return np.array([weights.dot(data**2), weights.dot(data), neff, neff])
else:
return sum(
(self._get_weighted_statistics(d,w) for d, w in zip(data,weights)),
self._empty_stats())
def _empty_stats(self):
return np.zeros_like(self.natural_hypparam)
### Gibbs sampling
def resample(self,data=[],temperature=1.,stats=None):
stats = self._get_statistics(data) if stats is None else stats
alphas_n, betas_n, mu_n, nus_n = self._natural_to_standard(
self.natural_hypparam + stats / temperature)
D = mu_n.shape[0]
self.sigmas = 1/np.random.gamma(alphas_n,scale=1/betas_n)
self.mu = np.sqrt(self.sigmas/nus_n)*np.random.randn(D) + mu_n
assert not np.isnan(self.mu).any()
assert not np.isnan(self.sigmas).any()
# NOTE: next line is to use Gibbs sampling to initialize mean field
self.mf_mu = self.mu
assert self.sigmas.ndim == 1
return self
def copy_sample(self):
new = copy.copy(self)
new.mu = self.mu.copy()
new.sigmas = self.sigmas.copy()
return new
### max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, muhat, sumsq = self._get_statistics(data)
else:
n, muhat, sumsq = self._get_weighted_statistics_old(data,weights)
self.mu = muhat
self.sigmas = sumsq/n
return self
### Mean Field
@property
def mf_natural_hypparam(self):
return self._standard_to_natural(self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
@mf_natural_hypparam.setter
def mf_natural_hypparam(self,natparam):
self.mf_alphas, self.mf_betas, self.mf_mu, self.mf_nus = \
self._natural_to_standard(natparam)
# NOTE: this part is for plotting
self.mu = self.mf_mu
self.sigmas = np.where(self.mf_alphas > 1,self.mf_betas / (self.mf_alphas - 1),100000)
def meanfieldupdate(self,data,weights):
self.mf_natural_hypparam = \
self.natural_hypparam + self._get_weighted_statistics(data,weights)
def meanfield_sgdstep(self,data,weights,minibatchfrac,stepsize):
self.mf_natural_hypparam = \
(1-stepsize) * self.mf_natural_hypparam + stepsize * (
self.natural_hypparam
+ 1./minibatchfrac * self._get_weighted_statistics(data,weights))
def get_vlb(self):
natparam_diff = self.natural_hypparam - self.mf_natural_hypparam
expected_stats = self._expected_statistics(
self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
linear_term = sum(v1.dot(v2) for v1, v2 in zip(natparam_diff, expected_stats))
normalizer_term = \
self._log_Z(self.alphas_0,self.betas_0,self.mu_0,self.nus_0) \
- self._log_Z(self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
return linear_term - normalizer_term - len(self.mf_mu)/2. * np.log(2*np.pi)
def expected_log_likelihood(self,x):
x = np.atleast_2d(x).reshape((-1,len(self.mf_mu)))
a,b,c,d = self._expected_statistics(
self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
return (x**2).dot(a) + x.dot(b) + c.sum() + d.sum() \
- len(self.mf_mu)/2. * np.log(2*np.pi)
def _expected_statistics(self,alphas,betas,mu,nus):
return np.array([
-1./2 * alphas/betas,
mu * alphas/betas,
-1./2 * (1./nus + mu**2 * alphas/betas),
-1./2 * (np.log(betas) - special.digamma(alphas))])
def _log_Z(self,alphas,betas,mu,nus):
return (special.gammaln(alphas) - alphas*np.log(betas) - 1./2*np.log(nus)).sum()
# TODO meanfield
class DiagonalGaussianNonconjNIG(_GaussianBase,GibbsSampling):
'''
Product of normal priors over mu and product of gamma priors over sigmas.
Note that while the conjugate prior in DiagonalGaussian is of the form
p(mu,sigmas), this prior is of the form p(mu)p(sigmas). Therefore its
resample() update has to perform inner iterations.
The prior follows
mu ~ N(mu_0,diag(sigmas_0))
sigmas ~ InvGamma(alpha_0,beta_0) iid
'''
def __init__(self,mu=None,sigmas=None,mu_0=None,sigmas_0=None,alpha_0=None,beta_0=None,
niter=20):
self.mu_0, self.sigmas_0 = mu_0, sigmas_0
self.alpha_0, self.beta_0 = alpha_0, beta_0
self.niter = niter
if None in (mu,sigmas):
self.resample()
else:
self.mu, self.sigmas = mu, sigmas
@property
def hypparams(self):
return dict(mu_0=self.mu_0,sigmas_0=self.sigmas_0,alpha_0=self.alpha_0,beta_0=self.beta_0)
# TODO next three methods are copied from DiagonalGaussian, factor them out
@property
def sigma(self):
return np.diag(self.sigmas)
def rvs(self,size=None):
size = np.array(size,ndmin=1)
return np.sqrt(self.sigmas)*\
np.random.normal(size=np.concatenate((size,self.mu.shape))) + self.mu
def log_likelihood(self,x):
mu, sigmas, D = self.mu, self.sigmas, self.mu.shape[0]
x = np.reshape(x,(-1,D))
Js = -1./(2*sigmas)
return (np.einsum('ij,ij,j->i',x,x,Js) - np.einsum('ij,j,j->i',x,2*mu,Js)) \
+ (mu**2*Js - 1./2*np.log(2*np.pi*sigmas)).sum()
def resample(self,data=[]):
n, y, ysq = self._get_statistics(data)
if n == 0:
self.mu = np.sqrt(self.sigmas_0) * np.random.randn(self.mu_0.shape[0]) + self.mu_0
self.sigmas = 1./np.random.gamma(self.alpha_0,scale=1./self.beta_0)
else:
for itr in xrange(self.niter):
sigmas_n = 1./(1./self.sigmas_0 + n / self.sigmas)
mu_n = (self.mu_0 / self.sigmas_0 + y / self.sigmas) * sigmas_n
self.mu = np.sqrt(sigmas_n) * np.random.randn(mu_n.shape[0]) + mu_n
alphas_n = self.alpha_0 + 1./2*n
betas_n = self.beta_0 + 1./2*(ysq + n*self.mu**2 - 2*self.mu*y)
self.sigmas = 1./np.random.gamma(alphas_n,scale=1./betas_n)
return self
def _get_statistics(self,data):
# TODO dont forget to handle nans
assert isinstance(data,(list,np.ndarray)) and not isinstance(data,np.ma.MaskedArray)
if isinstance(data,np.ndarray):
data = data[gi(data)]
n = data.shape[0]
y = np.einsum('ni->i',data)
ysq = np.einsum('ni,ni->i',data,data)
return np.array([n,y,ysq],dtype=np.object)
else:
return sum((self._get_statistics(d) for d in data),self._empty_stats)
@property
def _empty_stats(self):
return np.array([0.,np.zeros_like(self.mu_0),np.zeros_like(self.mu_0)],
dtype=np.object)
# TODO collapsed, meanfield, max_likelihood
class IsotropicGaussian(GibbsSampling):
'''
Normal-Inverse-Gamma prior over mu (mean vector) and sigma (scalar
variance). Essentially, all coordinates of all observations inform the
variance.
The prior follows
sigma ~ InvGamma(alpha_0,beta_0)
mu | sigma ~ N(mu_0,sigma/nu_0 * I)
'''
def __init__(self,mu=None,sigma=None,mu_0=None,nu_0=None,alpha_0=None,beta_0=None):
self.mu = mu
self.sigma = sigma
self.mu_0 = mu_0
self.nu_0 = nu_0
self.alpha_0 = alpha_0
self.beta_0 = beta_0
if mu is sigma is None and not any(_ is None for _ in (mu_0,nu_0,alpha_0,beta_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,nu_0=self.nu_0,alpha_0=self.alpha_0,beta_0=self.beta_0)
def rvs(self,size=None):
return np.sqrt(self.sigma)*np.random.normal(size=tuple(size)+self.mu.shape) + self.mu
def log_likelihood(self,x):
mu, sigma, D = self.mu, self.sigma, self.mu.shape[0]
x = np.reshape(x,(-1,D))
return (-0.5*((x-mu)**2).sum(1)/sigma - D*np.log(np.sqrt(2*np.pi*sigma)))
def _posterior_hypparams(self,n,xbar,sumsq):
mu_0, nu_0, alpha_0, beta_0 = self.mu_0, self.nu_0, self.alpha_0, self.beta_0
D = mu_0.shape[0]
if n > 0:
nu_n = D*n + nu_0
alpha_n = alpha_0 + D*n/2
beta_n = beta_0 + 1/2*sumsq + (n*D*nu_0)/(n*D+nu_0) * 1/2 * ((xbar - mu_0)**2).sum()
mu_n = (n*xbar + nu_0*mu_0)/(n+nu_0)
return mu_n, nu_n, alpha_n, beta_n
else:
return mu_0, nu_0, alpha_0, beta_0
### Gibbs sampling
def resample(self,data=[]):
mu_n, nu_n, alpha_n, beta_n = self._posterior_hypparams(
*self._get_statistics(data, D=self.mu_0.shape[0]))
D = mu_n.shape[0]
self.sigma = 1/np.random.gamma(alpha_n,scale=1/beta_n)
self.mu = np.sqrt(self.sigma/nu_n)*np.random.randn(D)+mu_n
return self
def _get_statistics(self,data, D=None):
n = getdatasize(data)
if n > 0:
D = D if D else getdatadimension(data)
if isinstance(data,np.ndarray):
assert (data.ndim == 1 and data.shape == (D,)) \
or (data.ndim == 2 and data.shape[1] == D)
data = np.reshape(data,(-1,D))
xbar = data.mean(0)
sumsq = ((data-xbar)**2).sum()
else:
xbar = sum(np.reshape(d,(-1,D)).sum(0) for d in data) / n
sumsq = sum(((np.reshape(data,(-1,D)) - xbar)**2).sum() for d in data)
else:
xbar, sumsq = None, None
return n, xbar, sumsq
class _ScalarGaussianBase(object):
@property
def params(self):
return dict(mu=self.mu,sigmasq=self.sigmasq)
def rvs(self,size=None):
return np.sqrt(self.sigmasq)*np.random.normal(size=size)+self.mu
def log_likelihood(self,x):
x = np.reshape(x,(-1,1))
return (-0.5*(x-self.mu)**2/self.sigmasq - np.log(np.sqrt(2*np.pi*self.sigmasq))).ravel()
def __repr__(self):
return self.__class__.__name__ + '(mu=%f,sigmasq=%f)' % (self.mu,self.sigmasq)
def plot(self,data=None,indices=None,color='b',plot_params=True,label=None):
import matplotlib.pyplot as plt
data = np.concatenate(data) if data is not None else None
indices = np.concatenate(indices) if indices is not None else None
if data is not None:
assert indices is not None
plt.plot(indices,data,color=color,marker='x',linestyle='')
if plot_params:
assert indices is not None
if len(indices) > 1:
from util.general import rle
vals, lens = rle(np.diff(indices))
starts = np.concatenate(((0,),lens.cumsum()[:-1]))
for start, blocklen in zip(starts[vals == 1], lens[vals == 1]):
plt.plot(indices[start:start+blocklen],
np.repeat(self.mu,blocklen),color=color,linestyle='--')
else:
plt.plot(indices,[self.mu],color=color,marker='+')
### mostly shared statistics gathering
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
if isinstance(data,np.ndarray):
ybar = data.mean()
centered = data.ravel() - ybar
sumsqc = centered.dot(centered)
elif isinstance(data,list):
ybar = sum(d.sum() for d in data)/n
sumsqc = sum((d.ravel()-ybar).dot(d.ravel()-ybar) for d in data)
else:
ybar = data
sumsqc = 0
else:
ybar = None
sumsqc = None
return n, ybar, sumsqc
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
if neff > weps:
ybar = weights.dot(data.ravel()) / neff
centered = data.ravel() - ybar
sumsqc = centered.dot(weights*centered)
else:
ybar = None
sumsqc = None
elif isinstance(data,list):
neff = sum(w.sum() for w in weights)
if neff > weps:
ybar = sum(w.dot(d.ravel()) for d,w in zip(data,weights)) / neff
sumsqc = sum((d.ravel()-ybar).dot(w*(d.ravel()-ybar))
for d,w in zip(data,weights))
else:
ybar = None
sumsqc = None
else:
ybar = data
sumsqc = 0
return neff, ybar, sumsqc
### max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, ybar, sumsqc = self._get_statistics(data)
else:
n, ybar, sumsqc = self._get_weighted_statistics(data,weights)
if sumsqc > 0:
self.mu = ybar
self.sigmasq = sumsqc/n
else:
self.broken = True
self.mu = 999999999.
self.sigmsq = 1.
return self
class ScalarGaussianNIX(_ScalarGaussianBase, GibbsSampling, Collapsed):
'''
Conjugate Normal-(Scaled-)Inverse-ChiSquared prior. (Another parameterization is the
Normal-Inverse-Gamma.)
'''
def __init__(self,mu=None,sigmasq=None,mu_0=None,kappa_0=None,sigmasq_0=None,nu_0=None):
self.mu = mu
self.sigmasq = sigmasq
self.mu_0 = mu_0
self.kappa_0 = kappa_0
self.sigmasq_0 = sigmasq_0
self.nu_0 = nu_0
if mu is sigmasq is None \
and not any(_ is None for _ in (mu_0,kappa_0,sigmasq_0,nu_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,kappa_0=self.kappa_0,
sigmasq_0=self.sigmasq_0,nu_0=self.nu_0)
def _posterior_hypparams(self,n,ybar,sumsqc):
mu_0, kappa_0, sigmasq_0, nu_0 = self.mu_0, self.kappa_0, self.sigmasq_0, self.nu_0
if n > 0:
kappa_n = kappa_0 + n
mu_n = (kappa_0 * mu_0 + n * ybar) / kappa_n
nu_n = nu_0 + n
sigmasq_n = 1/nu_n * (nu_0 * sigmasq_0 + sumsqc + kappa_0 * n / (kappa_0 + n) * (ybar - mu_0)**2)
return mu_n, kappa_n, sigmasq_n, nu_n
else:
return mu_0, kappa_0, sigmasq_0, nu_0
### Gibbs sampling
def resample(self,data=[]):
mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(data))
self.sigmasq = nu_n * sigmasq_n / np.random.chisquare(nu_n)
self.mu = np.sqrt(self.sigmasq / kappa_n) * np.random.randn() + mu_n
return self
### Collapsed
def log_marginal_likelihood(self,data):
n = getdatasize(data)
kappa_0, sigmasq_0, nu_0 = self.kappa_0, self.sigmasq_0, self.nu_0
mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(data))
return special.gammaln(nu_n/2) - special.gammaln(nu_0/2) \
+ 0.5*(np.log(kappa_0) - np.log(kappa_n)
+ nu_0 * (np.log(nu_0) + np.log(sigmasq_0))
- nu_n * (np.log(nu_n) + np.log(sigmasq_n))
- n*np.log(np.pi))
def log_predictive_single(self,y,olddata):
# mostly for testing or speed
mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(olddata))
return stats.t.logpdf(y,nu_n,loc=mu_n,scale=np.sqrt((1+kappa_n)*sigmasq_n/kappa_n))
class ScalarGaussianNonconjNIX(_ScalarGaussianBase, GibbsSampling):
'''
Non-conjugate separate priors on mean and variance parameters, via
mu ~ Normal(mu_0,tausq_0)
sigmasq ~ (Scaled-)Inverse-ChiSquared(sigmasq_0,nu_0)
'''
def __init__(self,mu=None,sigmasq=None,mu_0=None,tausq_0=None,sigmasq_0=None,nu_0=None,
niter=1):
self.mu, self.sigmasq = mu, sigmasq
self.mu_0, self.tausq_0 = mu_0, tausq_0
self.sigmasq_0, self.nu_0 = sigmasq_0, nu_0
self.niter = niter
if mu is sigmasq is None \
and not any(_ is None for _ in (mu_0, tausq_0, sigmasq_0, nu_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,tausq_0=self.tausq_0,
sigmasq_0=self.sigmasq_0,nu_0=self.nu_0)
def resample(self,data=[],niter=None):
n = getdatasize(data)
niter = self.niter if niter is None else niter
if n > 0:
data = flattendata(data)
datasum = data[gi(data)].sum()
datasqsum = (data[gi(data)]**2).sum()
nu_n = self.nu_0 + n
for itr in range(niter):
# resample mean
tausq_n = 1/(1/self.tausq_0 + n/self.sigmasq)
mu_n = tausq_n*(self.mu_0/self.tausq_0 + datasum/self.sigmasq)
self.mu = np.sqrt(tausq_n)*np.random.normal() + mu_n
# resample variance
sigmasq_n = (self.nu_0*self.sigmasq_0 + (datasqsum + n*self.mu**2-2*datasum*self.mu))/nu_n
self.sigmasq = sigmasq_n*nu_n/np.random.chisquare(nu_n)
else:
self.mu = np.sqrt(self.tausq_0) * np.random.normal() + self.mu_0
self.sigmasq = self.sigmasq_0*self.nu_0/np.random.chisquare(self.nu_0)
return self
class ScalarGaussianNonconjNIG(_ScalarGaussianBase, MeanField, MeanFieldSVI):
# NOTE: this is like ScalarGaussianNonconjNiIG except prior is in natural
# coordinates
def __init__(self,h_0,J_0,alpha_0,beta_0,
mu=None,sigmasq=None,
h_mf=None,J_mf=None,alpha_mf=None,beta_mf=None,niter=1):
self.h_0, self.J_0 = h_0, J_0
self.alpha_0, self.beta_0 = alpha_0, beta_0
self.h_mf = h_mf if h_mf is not None else J_0 * np.random.normal(h_0/J_0,1./np.sqrt(J_0))
self.J_mf = J_mf if J_mf is not None else J_0
self.alpha_mf = alpha_mf if alpha_mf is not None else alpha_0
self.beta_mf = beta_mf if beta_mf is not None else beta_0
self.niter = niter
self.mu = mu if mu is not None else np.random.normal(h_0/J_0,1./np.sqrt(J_0))
self.sigmasq = sigmasq if sigmasq is not None else 1./np.random.gamma(alpha_0,1./beta_0)
@property
def hypparams(self):
return dict(h_0=self.h_0,J_0=self.J_0,alpha_0=self.alpha_0,beta_0=self.beta_0)
@property
def _E_mu(self):
# E[mu], E[mu**2]
return self.h_mf / self.J_mf, 1./self.J_mf + (self.h_mf / self.J_mf)**2
@property
def _E_sigmasq(self):
# E[1/sigmasq], E[ln sigmasq]
return self.alpha_mf / self.beta_mf, \
np.log(self.beta_mf) - special.digamma(self.alpha_mf)
@property
def natural_hypparam(self):
return np.array([self.alpha_0,self.beta_0,self.h_0,self.J_0])
@natural_hypparam.setter
def natural_hypparam(self,natural_hypparam):
self.alpha_0, self.beta_0, self.h_0, self.J_0 = natural_hypparam
@property
def mf_natural_hypparam(self):
return np.array([self.alpha_mf,self.beta_mf,self.h_mf,self.J_mf])
@mf_natural_hypparam.setter
def mf_natural_hypparam(self,mf_natural_hypparam):
self.alpha_mf, self.beta_mf, self.h_mf, self.J_mf = mf_natural_hypparam
# set point estimates of (mu, sigmasq) for plotting and stuff
self.mu, self.sigmasq = self.h_mf / self.J_mf, self.beta_mf / (self.alpha_mf-1)
def _resample_from_mf(self):
self.mu, self.sigmasq = np.random.normal(self.h_mf/self.J_mf,np.sqrt(1./self.J_mf)), \
np.random.gamma(self.alpha_mf,1./self.beta_mf)
return self
def expected_log_likelihood(self,x):
(Emu, Emu2), (Esigmasqinv, Elnsigmasq) = self._E_mu, self._E_sigmasq
return -1./2 * Esigmasqinv * (x**2 + Emu2 - 2*x*Emu) \
- 1./2*Elnsigmasq - 1./2*np.log(2*np.pi)
def get_vlb(self):
# E[ln p(mu) / q(mu)] part
h_0, J_0, J_mf = self.h_0, self.J_0, self.J_mf
Emu, Emu2 = self._E_mu
p_mu_avgengy = -1./2*J_0*Emu2 + h_0*Emu \
- 1./2*(h_0**2/J_0) + 1./2*np.log(J_0) - 1./2*np.log(2*np.pi)
q_mu_entropy = 1./2*np.log(2*np.pi*np.e/J_mf)
# E[ln p(sigmasq) / q(sigmasq)] part
alpha_0, beta_0, alpha_mf, beta_mf = \
self.alpha_0, self.beta_0, self.alpha_mf, self.beta_mf
(Esigmasqinv, Elnsigmasq) = self._E_sigmasq
p_sigmasq_avgengy = (-alpha_0-1)*Elnsigmasq + (-beta_0)*Esigmasqinv \
- (special.gammaln(alpha_0) - alpha_0*np.log(beta_0))
q_sigmasq_entropy = alpha_mf + np.log(beta_mf) + special.gammaln(alpha_mf) \
- (1+alpha_mf)*special.digamma(alpha_mf)
return p_mu_avgengy + q_mu_entropy + p_sigmasq_avgengy + q_sigmasq_entropy
def meanfield_sgdstep(self,data,weights,minibatchfrac,stepsize):
# like meanfieldupdate except we step the factors simultaneously
# NOTE: unlike the fully conjugate case, there are interaction terms, so
# we work on the destructured pieces
neff, y, ysq = self._get_weighted_statistics(data,weights)
Emu, _ = self._E_mu
Esigmasqinv, _ = self._E_sigmasq
# form new natural hyperparameters as if doing a batch update
alpha_new = self.alpha_0 + 1./minibatchfrac * 1./2*neff
beta_new = self.beta_0 + 1./minibatchfrac * 1./2*(ysq + neff*Emu**2 - 2*Emu*y)
h_new = self.h_0 + 1./minibatchfrac * Esigmasqinv * y
J_new = self.J_0 + 1./minibatchfrac * Esigmasqinv * neff
# take a step
self.alpha_mf = (1-stepsize)*self.alpha_mf + stepsize*alpha_new
self.beta_mf = (1-stepsize)*self.beta_mf + stepsize*beta_new
self.h_mf = (1-stepsize)*self.h_mf + stepsize*h_new
self.J_mf = (1-stepsize)*self.J_mf + stepsize*J_new
# calling this setter will set point estimates for (mu,sigmasq) for
# plotting and sampling and stuff
self.mf_natural_hypparam = (self.alpha_mf, self.beta_mf, self.h_mf, self.J_mf)
return self
def meanfieldupdate(self,data,weights,niter=None):
niter = niter if niter is not None else self.niter
neff, y, ysq = self._get_weighted_statistics(data,weights)
for niter in xrange(niter):
# update q(sigmasq)
Emu, _ = self._E_mu
self.alpha_mf = self.alpha_0 + 1./2*neff
self.beta_mf = self.beta_0 + 1./2*(ysq + neff*Emu**2 - 2*Emu*y)
# update q(mu)
Esigmasqinv, _ = self._E_sigmasq
self.h_mf = self.h_0 + Esigmasqinv * y
self.J_mf = self.J_0 + Esigmasqinv * neff
# calling this setter will set point estimates for (mu,sigmasq) for
# plotting and sampling and stuff
self.mf_natural_hypparam = \
(self.alpha_mf, self.beta_mf, self.h_mf, self.J_mf)
return self
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
y = weights.dot(data)
ysq = weights.dot(data**2)
else:
return sum(
self._get_weighted_statistics(d,w) for d,w in zip(data,weights))
return np.array([neff,y,ysq])
class ScalarGaussianFixedvar(_ScalarGaussianBase, GibbsSampling):
'''
Conjugate normal prior on mean.
'''
def __init__(self,mu=None,sigmasq=None,mu_0=None,tausq_0=None):
self.mu = mu
self.sigmasq = sigmasq
self.mu_0 = mu_0
self.tausq_0 = tausq_0
if mu is None and not any(_ is None for _ in (mu_0,tausq_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,tausq_0=self.tausq_0)
def _posterior_hypparams(self,n,xbar):
mu_0, tausq_0 = self.mu_0, self.tausq_0
sigmasq = self.sigmasq
if n > 0:
tausq_n = 1/(1/tausq_0 + n/sigmasq)
mu_n = (mu_0/tausq_0 + n*xbar/sigmasq)*tausq_n
return mu_n, tausq_n
else:
return mu_0, tausq_0
def resample(self,data=[]):
mu_n, tausq_n = self._posterior_hypparams(*self._get_statistics(data))
self.mu = np.sqrt(tausq_n)*np.random.randn()+mu_n
return self
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
if isinstance(data,np.ndarray):
xbar = data.mean()
else:
xbar = sum(d.sum() for d in data)/n
else:
xbar = None
return n, xbar
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
else:
neff = sum(w.sum() for w in weights)
if neff > weps:
if isinstance(data,np.ndarray):
xbar = data.dot(weights) / neff
else:
xbar = sum(w.dot(d) for d,w in zip(data,weights)) / neff
else:
xbar = None
return neff, xbar
def max_likelihood(self,data,weights=None):
if weights is None:
_, xbar = self._get_statistics(data)
else:
_, xbar = self._get_weighted_statistics(data,weights)
self.mu = xbar
|
michaelpacer/pybasicbayes
|
pybasicbayes/distributions/gaussian.py
|
Python
|
mit
| 51,874
|
[
"Gaussian"
] |
94a0bb57b259a57e1abe887adf1087f58abb49e3e92a8e045209a27c4744d2ce
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska and the SALib team
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import numpy as np
import math
class fast(_algorithm):
'''
Fourier Amplitude Sensitivity Test (FAST)
This class holds the Fourier Amplitude Sensitivity Test (FAST) based on Cukier et al. (1973) and Saltelli et al. (1999):
Cukier, R. I., Fortuin, C. M., Shuler, K. E., Petschek, A. G. and Schaibly, J. H.: Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients. I Theory, J. Chem. Phys., 59(8), 3873–3878, 1973.
Saltelli, A., Tarantola, S. and Chan, K. P.-S.: A Quantitative Model-Independent Method for Global Sensitivity Analysis of Model Output, Technometrics, 41(1), 39–56, doi:10.1080/00401706.1999.10485594, 1999.
The presented code is based on SALib
Copyright (C) 2013-2015 Jon Herman and others. Licensed under the GNU Lesser General Public License.
The Sensitivity Analysis Library is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
The Sensitivity Analysis Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with the Sensitivity Analysis Library. If not, see http://www.gnu.org/licenses/.
'''
def __init__(self, *args, **kwargs):
'''
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
*True: Simulation results will be saved
*False: Simulation results will not be saved
'''
kwargs['algorithm_name'] = 'Fourier Amplitude Sensitivity Test (FAST)'
super(fast, self).__init__(*args, **kwargs)
def scale_samples(self, params, bounds):
'''
Rescales samples in 0-to-1 range to arbitrary bounds.
Arguments:
bounds - list of lists of dimensions num_params-by-2
params - numpy array of dimensions num_params-by-N,
where N is the number of samples
'''
# Check bounds are legal (upper bound is greater than lower bound)
b = np.array(bounds)
lower_bounds = b[:, 0]
upper_bounds = b[:, 1]
if np.any(lower_bounds >= upper_bounds):
raise ValueError("Bounds are not legal")
# This scales the samples in-place, by using the optional output
# argument for the numpy ufunctions
# The calculation is equivalent to:
# sample * (upper_bound - lower_bound) + lower_bound
np.add(np.multiply(params,
(upper_bounds - lower_bounds),
out=params),
lower_bounds,
out=params)
def matrix(self, bounds, N, M=4):
D = len(bounds)
omega = np.empty([D])
omega[0] = math.floor((N - 1) / (2 * M))
m = math.floor(omega[0] / (2 * M))
if m >= (D - 1):
omega[1:] = np.floor(np.linspace(1, m, D - 1))
else:
omega[1:] = np.arange(D - 1) % m + 1
# Discretization of the frequency space, s
s = (2 * math.pi / N) * np.arange(N)
#s = math.pi / 2.0 * (2 * np.arange(1,N+1) - N-1) / N
# Transformation to get points in the X space
X = np.empty([N * D, D])
omega2 = np.empty([D])
for i in range(D):
omega2[i] = omega[0]
idx = list(range(i)) + list(range(i + 1, D))
omega2[idx] = omega[1:]
l = range(i * N, (i + 1) * N)
# random phase shift on [0, 2pi) following Saltelli et al.
# Technometrics 1999
phi = 2 * math.pi * np.random.rand()
for j in range(D):
g = 0.5 + (1 / math.pi) * \
np.arcsin(np.sin(omega2[j] * s + phi))
X[l, j] = g
self.scale_samples(X, bounds)
return X
def analyze(self, problem, Y, D, parnames, M=4, print_to_console=False):
if len(Y.shape) > 1:
Y = Y.flatten()
print(Y.size)
if Y.size % (D) == 0:
N = int(Y.size / D)
elif Y.size > D:
N = int(Y.size / D)
rest = Y.size - N*D
print("""
We can not use """ + str(rest) + """ samples which was generated
of totaly """ + str(Y.size) + """
""")
else:
print("""
Error: Number of samples in model output file must be a multiple of D,
where D is the number of parameters in your parameter file.
""")
exit()
# Recreate the vector omega used in the sampling
omega = np.empty([D])
omega[0] = math.floor((N - 1) / (2 * M))
m = math.floor(omega[0] / (2 * M))
if m >= (D - 1):
omega[1:] = np.floor(np.linspace(1, m, D - 1))
else:
omega[1:] = np.arange(D - 1) % m + 1
# Calculate and Output the First and Total Order Values
if print_to_console:
print("Parameter First Total")
Si = dict((k, [None] * D) for k in ['S1', 'ST'])
for i in range(D):
l = np.arange(i * N, (i + 1) * N)
Si['S1'][i] = self.compute_first_order(Y[l], N, M, omega[0])
Si['ST'][i] = self.compute_total_order(Y[l], N, omega[0])
if print_to_console:
print("%s %f %f" %
(parnames[i], Si['S1'][i], Si['ST'][i]))
return Si
def compute_first_order(self, outputs, N, M, omega):
f = np.fft.fft(outputs)
Sp = np.power(np.absolute(f[np.arange(1, int(N / 2))]) / N, 2)
V = 2 * np.sum(Sp)
D1 = 2 * np.sum(Sp[np.arange(1, M + 1) * int(omega) - 1])
return D1 / V
def compute_total_order(self, outputs, N, omega):
f = np.fft.fft(outputs)
Sp = np.power(np.absolute(f[np.arange(1, int((N + 1) / 2))]) / N, 2)
V = 2 * np.sum(Sp)
Dt = 2 * sum(Sp[np.arange(int(omega / 2))])
return (1 - Dt / V)
def sample(self, repetitions, M=4):
"""
Samples from the FAST algorithm.
Input
----------
repetitions: int
Maximum number of runs.
"""
self.set_repetiton(repetitions)
print('Starting the FAST algotrithm with '+str(repetitions)+ ' repetitions...')
print('Creating FAST Matrix')
# Get the names of the parameters to analyse
names = self.parameter()['name']
# Get the minimum and maximum value for each parameter from the
# distribution
parmin, parmax = self.parameter()['minbound'], self.parameter()[
'maxbound']
# Create an Matrix to store the parameter sets
N = int(math.ceil(float(repetitions) / float(len(parmin))))
bounds = []
for i in range(len(parmin)):
bounds.append([parmin[i], parmax[i]])
Matrix = self.matrix(bounds, N, M=M)
lastbackup=0
if self.breakpoint == 'read' or self.breakpoint == 'readandwrite':
data_frombreak = self.read_breakdata(self.dbname)
rep = data_frombreak[0]
Matrix = data_frombreak[1]
param_generator = (
(rep, Matrix[rep]) for rep in range(len(Matrix)))
for rep, randompar, simulations in self.repeat(param_generator):
# Calculate the objective function
self.postprocessing(rep, randompar, simulations)
if self.breakpoint == 'write' or self.breakpoint == 'readandwrite':
if rep >= lastbackup+self.backup_every_rep:
work = (rep, Matrix[rep:])
self.write_breakdata(self.dbname, work)
lastbackup = rep
self.final_call()
try:
data = self.datawriter.getdata()
# this is likely to crash if database does not assign name 'like1'
Si = self.analyze(
bounds, data['like1'], len(bounds), names, M=M, print_to_console=True)
except AttributeError: # Happens if no database was assigned
pass
|
thouska/spotpy
|
spotpy/algorithms/fast.py
|
Python
|
mit
| 10,004
|
[
"Gaussian"
] |
2c01cdc615ddf3b730d659b11dd5a70c5fccffbf4fd889ab9001708082b3790f
|
"""
Helper functions for the course complete event that was originally included with the Badging MVP.
"""
import hashlib
import logging
import six
from django.urls import reverse
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from lms.djangoapps.badges.models import BadgeAssertion, BadgeClass, CourseCompleteImageConfiguration
from lms.djangoapps.badges.utils import requires_badges_enabled, site_prefix
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=import-error, wrong-import-order
LOGGER = logging.getLogger(__name__)
# NOTE: As these functions are carry-overs from the initial badging implementation, they are used in
# migrations. Please check the badge migrations when changing any of these functions.
def course_slug(course_key, mode):
"""
Legacy: Not to be used as a model for constructing badge slugs. Included for compatibility with the original badge
type, awarded on course completion.
Slug ought to be deterministic and limited in size so it's not too big for Badgr.
Badgr's max slug length is 255.
"""
# Seven digits should be enough to realistically avoid collisions. That's what git services use.
digest = hashlib.sha256(
u"{}{}".format(six.text_type(course_key), six.text_type(mode)).encode('utf-8')
).hexdigest()[:7]
base_slug = slugify(six.text_type(course_key) + u'_{}_'.format(mode))[:248]
return base_slug + digest
def badge_description(course, mode):
"""
Returns a description for the earned badge.
"""
if course.end:
return _(u'Completed the course "{course_name}" ({course_mode}, {start_date} - {end_date})').format(
start_date=course.start.date(),
end_date=course.end.date(),
course_name=course.display_name,
course_mode=mode,
)
else:
return _(u'Completed the course "{course_name}" ({course_mode})').format(
course_name=course.display_name,
course_mode=mode,
)
def evidence_url(user_id, course_key):
"""
Generates a URL to the user's Certificate HTML view, along with a GET variable that will signal the evidence visit
event.
"""
course_id = six.text_type(course_key)
# avoid circular import problems
from lms.djangoapps.certificates.models import GeneratedCertificate
cert = GeneratedCertificate.eligible_certificates.get(user__id=int(user_id), course_id=course_id)
return site_prefix() + reverse(
'certificates:render_cert_by_uuid', kwargs={'certificate_uuid': cert.verify_uuid}) + '?evidence_visit=1'
def criteria(course_key):
"""
Constructs the 'criteria' URL from the course about page.
"""
about_path = reverse('about_course', kwargs={'course_id': six.text_type(course_key)})
return u'{}{}'.format(site_prefix(), about_path)
def get_completion_badge(course_id, user):
"""
Given a course key and a user, find the user's enrollment mode
and get the Course Completion badge.
"""
from common.djangoapps.student.models import CourseEnrollment
badge_classes = CourseEnrollment.objects.filter(
user=user, course_id=course_id
).order_by('-is_active')
if not badge_classes:
return None
mode = badge_classes[0].mode
course = modulestore().get_course(course_id)
if not course.issue_badges:
return None
return BadgeClass.get_badge_class(
slug=course_slug(course_id, mode),
issuing_component='',
criteria=criteria(course_id),
description=badge_description(course, mode),
course_id=course_id,
mode=mode,
display_name=course.display_name,
image_file_handle=CourseCompleteImageConfiguration.image_for_mode(mode)
)
@requires_badges_enabled
def course_badge_check(user, course_key):
"""
Takes a GeneratedCertificate instance, and checks to see if a badge exists for this course, creating
it if not, should conditions be right.
"""
if not modulestore().get_course(course_key).issue_badges:
LOGGER.info("Course is not configured to issue badges.")
return
badge_class = get_completion_badge(course_key, user)
if not badge_class:
# We're not configured to make a badge for this course mode.
return
if BadgeAssertion.objects.filter(user=user, badge_class=badge_class):
LOGGER.info("Completion badge already exists for this user on this course.")
# Badge already exists. Skip.
return
evidence = evidence_url(user.id, course_key)
badge_class.award(user, evidence_url=evidence)
|
stvstnfrd/edx-platform
|
lms/djangoapps/badges/events/course_complete.py
|
Python
|
agpl-3.0
| 4,669
|
[
"VisIt"
] |
454f4e3c376713f3fcbc7de6fc4ff89efd2bf32f42a97366f632ab0589c9ff80
|
from numpy import array_equal
from numpy import exp
from numpy import dot
from neural_network.neurallayer import NeuralLayer
class NeuralNetwork:
"""
A basic multilayered neural network. Contains variable input nodes, inner layers, and outputs. Input and output are both
considered layers (i.e. "single layer" would be number_of_layers == 2).
inputs inner layers outputs
------ ------
------ | sw | ---- | sw | \
------ \/ ------ \
------ /\ ------ \ ------
------ | sw | ---- | sw | --->- | sw |
------ \/ ------ / ------
------ /\ ------ /
------ | sw | ---- | sw | /
------ ------
Parameters
----------
number_of_layers : int
The number of layers (including the input and output layers) that are in the neural network
number_of_inputs : int
The number of inputs in the data
number_of_outputs : int
The number of outputs the network will produce
number_of_nodes_in_hidden_layers : int
The number of nodes in the inner layers.
bias : float
The bias we want to apply
learning_rate : float
The learning rate
"""
def __init__(self, number_of_layers=3, number_of_inputs=3, number_of_outputs=1, number_of_nodes_in_hidden_layers=4,
bias=0, learning_rate=1):
# define neuron and assign random weights
layers = []
if number_of_layers <= 2:
layers.append(NeuralLayer(number_of_inputs, number_of_outputs))
else:
layers.append(NeuralLayer(number_of_inputs, number_of_nodes_in_hidden_layers))
for i in range(number_of_layers-2):
layers.append(NeuralLayer(number_of_nodes_in_hidden_layers, number_of_nodes_in_hidden_layers))
layers.append(NeuralLayer(number_of_nodes_in_hidden_layers, number_of_outputs))
self.layers = layers
self._input_layer = self.layers[0]
self._output_layer = self.layers[-1]
self._bias = bias
self._learning_rate = learning_rate
@staticmethod
def _sigmoid(weighted_sum_of_inputs):
"""
The Sigmoid function, which describes an S shaped curve. We pass the weighted sum of the inputs through this function
to normalise them between 0 and 1.
Parameters
----------
weighted_sum_of_inputs : numpy.ndarray
Weighted sum to be normalized
Returns
----------
: float
Normalized weight (0-1)
"""
return 1 / (1 + exp(-weighted_sum_of_inputs))
@staticmethod
def _sigmoid_derivative(normalized_weight_of_inputs):
"""
The derivative of the Sigmoid function. This is the gradient of the Sigmoid curve. It indicates how confident we are
about the existing weight.
Parameters
----------
normalized_weight_of_inputs : numpy.ndarray
Normalized weight (0-1)
Returns
----------
: numpy.ndarray
Confidence about existing weight
"""
return normalized_weight_of_inputs * (1.0 - normalized_weight_of_inputs)
def train(self, training_inputs, training_outputs, number_of_iterations):
"""
Train the neural network by trial and error adjusting the weights each time.
Parameters
----------
training_inputs : numpy.ndarray
The input used for training the neural network
training_outputs : numpy.ndarray
The training output for the neural network
number_of_iterations : int
Number of iterations we should execute for the training
"""
for iteration in range(number_of_iterations):
# Pass off the training set to our neuron
outputs = self.think(training_inputs, self._bias)
# Multiply the error by the input and again by the gradient of the Sigmoid curve.
# This means less confident weights are adjusted more.
# This means inputs, which are zero, do not cause changes to the weights.
errors, deltas = self._calculate_error_and_weight_change(outputs, training_outputs)
adjustments = self._calculate_weight_adjustments(deltas, outputs, training_inputs)
# Adjust the weights.
for i in range(len(self.layers)):
self.layers[i].synaptic_weights += adjustments[i]
@staticmethod
def _calculate_weight_adjustments(deltas, outputs, training_inputs):
"""
Calculate the adjustments that need to be made to the weights.
Parameters
----------
deltas : list
The change to the weights based on learning rate and error
outputs: : list
Results of the passthrough
training_inputs : numpy.ndarray
The training data
Return
----------
adjustments : list
The adjustments to be made to the weights.
"""
adjustments = []
i = 0
for delta in reversed(deltas):
if array_equal(delta, deltas[-1]):
adjustments.append(training_inputs.T.dot(delta))
else:
adjustments.append(outputs[i].T.dot(delta))
i += 1
return adjustments
def _calculate_error_and_weight_change(self, outputs, training_outputs):
deltas = []
errors = []
i = len(self.layers) - 1
for out in reversed(outputs):
if out is outputs[-1]:
errors.append(training_outputs - outputs[-1])
else:
errors.append(errors[-1].dot(self.layers[i].synaptic_weights.T))
i -= 1
deltas.append(self._learning_rate * errors[-1] * self._sigmoid_derivative(out))
return errors, deltas
def think(self, inputs, bias=0):
"""
Pass inputs through our neural network (our single neuron).
Parameters
----------
inputs : numpy.ndarray
Input data
bias : float
The bias to apply
Return
----------
outputs : numpy.ndarray
The normalized weights
"""
input_data = inputs
outputs = []
for layer in self.layers:
input_data = self._sigmoid(dot(input_data, layer.synaptic_weights) + bias)
outputs.append(input_data)
return outputs
|
maxleblanc/making_a_neural_network
|
neural_network/neuralnet.py
|
Python
|
apache-2.0
| 6,736
|
[
"NEURON"
] |
58ef9f21efeecc6deaad8b404efc8f7d7bd07a0971b304893a0571a99e48d4c7
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for ops used in imperative programming."""
from __future__ import absolute_import
from ..ndarray import numpy as _mx_nd_np
__all__ = ["randint", "uniform", "normal"]
def randint(low, high=None, size=None, dtype=None, **kwargs):
"""Return random integers from `low` (inclusive) to `high` (exclusive).
Return random integers from the "discrete uniform" distribution of
the specified dtype in the "half-open" interval [`low`, `high`). If
`high` is None (the default), then results are from [0, `low`).
Parameters
----------
low : int
Lowest (signed) integer to be drawn from the distribution (unless
``high=None``, in which case this parameter is one above the
*highest* such integer).
high : int, optional
If provided, one above the largest (signed) integer to be drawn
from the distribution (see above for behavior if ``high=None``).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
on the platform. The default value is 'np.int'.
ctx : Context, optional
Device context of output. Default is current context.
out : ndarray, optional
The output ndarray (default is `None`).
Returns
-------
out : ndarray of ints
`size`-shaped array of random integers from the appropriate
distribution, or a single such random int if `size` not provided.
Examples
--------
>>> np.random.randint(2, size=10)
array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])
>>> np.random.randint(1, size=10)
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Generate a 2 x 4 array of ints between 0 and 4, inclusive:
>>> np.random.randint(5, size=(2, 4))
array([[4, 0, 2, 1],
[3, 2, 2, 0]])
"""
return _mx_nd_np.random.randint(low, high, size, dtype, **kwargs)
def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None):
"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high). In other words,
any value within the given interval is equally likely to be drawn
by `uniform`.
Parameters
----------
low : float, ndarray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, ndarray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a scalar tensor containing a single value is returned if
``low`` and ``high`` are both scalars.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray
Drawn samples from the parameterized uniform distribution.
"""
return _mx_nd_np.random.uniform(low, high, size=size, ctx=ctx, dtype=dtype, out=out)
def normal(loc=0.0, scale=1.0, size=None, **kwargs):
"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float, optional
Mean (centre) of the distribution.
scale : float, optional
Standard deviation (spread or "width") of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., `(m, n, k)`, then `m * n * k`
samples are drawn. If size is `None` (default), a scalar tensor containing
a single value is returned if loc and scale are both scalars.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray
Drawn samples from the parameterized normal distribution.
Notes
-----
This function currently does not support ``loc`` and ``scale`` as ndarrays.
"""
return _mx_nd_np.random.normal(loc, scale, size, **kwargs)
def multinomial(n, pvals, size=None, **kwargs):
"""multinomial(n, pvals, size=None)
Draw samples from a multinomial distribution.
The multinomial distribution is a multivariate generalisation of the binomial distribution.
Take an experiment with one of ``p`` possible outcomes. An example of such an experiment is throwing a dice,
where the outcome can be 1 through 6. Each sample drawn from the distribution represents n such experiments.
Its values, ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the outcome was ``i``.
Parameters
----------
n : int
Number of experiments.
pvals : sequence of floats, length p
Probabilities of each of the p different outcomes. These should sum to 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples
are drawn. Default is None, in which case a single value is returned.
Returns
-------
out : ndarray
The drawn samples, of shape size, if that was provided. If not, the shape is ``(N,)``.
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution.
Examples
--------
Throw a dice 1000 times, and 1000 times again:
>>> np.random.multinomial(1000, [1/6.]*6, size=2)
array([[164, 161, 179, 158, 150, 188],
[178, 162, 177, 143, 163, 177]])
A loaded die is more likely to land on number 6:
>>> np.random.multinomial(100, [1/7.]*5 + [2/7.])
array([19, 14, 12, 11, 21, 23])
>>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3])
array([32, 68])
"""
return _mx_nd_np.random.multinomial(n, pvals, size, **kwargs)
|
reminisce/mxnet
|
python/mxnet/numpy/random.py
|
Python
|
apache-2.0
| 7,462
|
[
"Gaussian"
] |
73d2d9dad71c29ac0097b1b235609555fadbb2a57b655fb6b16b5ac9e1d77c1b
|
# This module provides classes that represent VRML objects for use
# in data visualization applications.
#
# Written by: Konrad Hinsen <[email protected]>
# Last revision: 2001-1-3
#
"""This module provides definitions of simple 3D graphics objects and
VRML scenes containing them. The objects are appropriate for data
visualization, not for virtual reality modelling. Scenes can be written
to VRML files or visualized immediately using a VRML browser, whose
name is taken from the environment variable VRMLVIEWER (under Unix).
There are a few attributes that are common to all graphics objects:
material -- a Material object defining color and surface properties
comment -- a comment string that will be written to the VRML file
reuse -- a boolean flag (defaulting to false). If set to one,
the object may share its VRML definition with other
objects. This reduces the size of the VRML file, but
can yield surprising side effects in some cases.
This module used the original VRML definition, version 1.0. For the
newer VRML 2 or VRML97, use the module VRML2, which uses exactly the
same interface. There is another almost perfectly compatible module
VMD, which produces input files for the molecular visualization program
VMD.
Example:
>>>from Scientific.Visualization.VRML import *
>>>scene = Scene([])
>>>scale = ColorScale(10.)
>>>for x in range(11):
>>> color = scale(x)
>>> scene.addObject(Cube(Vector(x, 0., 0.), 0.2,
>>> material=Material(diffuse_color = color)))
>>>scene.view()
"""
from Scientific.IO.TextFile import TextFile
from Scientific.Geometry import Transformation, Vector, VectorModule
import Numeric
import os, string, tempfile
from Color import *
#
# VRML file
#
class SceneFile:
def __init__(self, filename, mode = 'r'):
if mode == 'r':
raise TypeError, 'Not yet implemented.'
self.file = TextFile(filename, 'w')
self.file.write('#VRML V1.0 ascii\n')
self.file.write('Separator {\n')
self.memo = {}
self.name_counter = 0
def __del__(self):
self.close()
def writeString(self, data):
self.file.write(data)
def close(self):
if self.file is not None:
self.file.write('}\n')
self.file.close()
self.file = None
def write(self, object):
object.writeToFile(self)
def uniqueName(self):
self.name_counter = self.name_counter + 1
return 'i' + `self.name_counter`
VRMLFile = SceneFile
#
# Scene
#
class Scene:
"""VRML scene
A VRML scene is a collection of graphics objects that can be
written to a VRML file or fed directly to a VRML browser.
Constructor: Scene(|objects|=None, |cameras|=None, **|options|)
Arguments:
|objects| -- a list of graphics objects or 'None' for an empty scene
|cameras| -- a list of cameras (not yet implemented)
|options| -- options as keyword arguments (none defined at the moment;
this argument is provided for compatibility with
other modules)
"""
def __init__(self, objects = None, cameras = None, **options):
if objects is None:
self.objects = []
elif type(objects) == type([]):
self.objects = objects
else:
self.objects = [objects]
if cameras is None:
self.cameras = []
else:
self.cameras = cameras
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.object[item]
def addObject(self, object):
"Adds |object| to the list of graphics objects."
self.objects.append(object)
def addCamera(self, camera):
"Adds |camers| to the list of cameras."
self.cameras.append(camera)
def writeToFile(self, filename):
"Writes the scene to a VRML file with name |filename|."
file = VRMLFile(filename, 'w')
if self.cameras:
self.cameras[0].writeToFile(file)
for o in self.objects:
o.writeToFile(file)
file.close()
def view(self):
"Start a VRML browser for the scene."
import sys
filename = tempfile.mktemp()+'.wrl'
if sys.platform == 'win32':
import win32api
self.writeToFile(filename)
win32api.ShellExecute(0, "open", filename, None, "", 1)
elif os.environ.has_key('VRMLVIEWER'):
self.writeToFile(filename)
if os.fork() == 0:
os.system(os.environ['VRMLVIEWER'] + ' ' + filename +
' 1> /dev/null 2>&1')
os.unlink(filename)
os._exit(0)
else:
print 'No VRML viewer defined'
#
# Base class for everything that produces nodes
#
class VRMLObject:
def __init__(self, attr):
self.attr = {}
for key, value in attr.items():
if key in self.attribute_names:
self.attr[key] = value
else:
raise AttributeError, 'illegal attribute: ' + str(key)
attribute_names = ['comment']
def __getitem__(self, attr):
try:
return self.attr[attr]
except KeyError:
return None
def __setitem__(self, attr, value):
self.attr[attr] = value
def __copy__(self):
return copy.deepcopy(self)
def writeToFile(self, file):
raise AttributeError, 'Class ' + self.__class__.__name__ + \
' does not implement file output.'
#
# Shapes
#
class ShapeObject(VRMLObject):
def __init__(self, attr, rotation, translation, reference_point):
VRMLObject.__init__(self, attr)
if rotation is None:
rotation = Transformation.Rotation(VectorModule.ez, 0.)
else:
rotation = apply(Transformation.Rotation, rotation)
if translation is None:
translation = Transformation.Translation(Vector(0.,0.,0.))
else:
translation = Transformation.Translation(translation)
self.transformation = translation*rotation
self.reference_point = reference_point
attribute_names = VRMLObject.attribute_names + ['material', 'reuse']
def __add__(self, other):
return Group([self]) + Group([other])
def writeToFile(self, file):
comment = self['comment']
if comment is not None:
file.writeString('# ' + comment + '\n')
file.writeString('TransformSeparator {\n')
vector = self.transformation.translation().displacement()
axis, angle = self.transformation.rotation().axisAndAngle()
trans_flag = vector.length() > 1.e-4
rot_flag = abs(angle) > 1.e-4
if trans_flag and rot_flag:
file.writeString('Transform{translation ' + `vector[0]` + ' ' + \
`vector[1]` + ' ' + `vector[2]` + \
' rotation ' + `axis[0]` + ' ' + `axis[1]` +
' ' + `axis[2]` + ' ' + `angle` + '}\n')
elif trans_flag:
file.writeString('Translation{translation ' + `vector[0]` + ' ' + \
`vector[1]` + ' ' + `vector[2]` + '}\n')
elif rot_flag:
file.writeString('Rotation{rotation ' + `axis[0]` + ' ' + \
`axis[1]` + ' ' + `axis[2]` + ' ' + \
`angle` + '}\n')
material = self['material']
reuse = self['reuse']
if reuse:
key = self.memoKey() + (material, self.__class__)
if file.memo.has_key(key):
file.writeString('USE ' + file.memo[key] + '\n')
self.use(file)
if material is not None:
material.use(file)
else:
name = file.uniqueName()
file.memo[key] = name
file.writeString('DEF ' + name + ' Group{\n')
if material is not None:
material.writeToFile(file)
self.writeSpecification(file)
file.writeString('}\n')
else:
if material is not None:
material.writeToFile(file)
self.writeSpecification(file)
file.writeString('}\n')
def use(self, file):
pass
class Sphere(ShapeObject):
"""Sphere
Constructor: Sphere(|center|, |radius|, **|attributes|)
Arguments:
|center| -- the center of the sphere (a vector)
|radius| -- the sphere radius (a positive number)
|attributes| -- any graphics object attribute
"""
def __init__(self, center, radius, **attr):
self.radius = radius
ShapeObject.__init__(self, attr, None, center, center)
def writeSpecification(self, file):
file.writeString('Sphere{radius ' + `self.radius` + '}\n')
def memoKey(self):
return (self.radius, )
class Cube(ShapeObject):
"""Cube
Constructor: Cube(|center|, |edge|, **|attributes|)
Arguments:
|center| -- the center of the cube (a vector)
|edge| -- the length of an edge (a positive number)
|attributes| -- any graphics object attribute
The edges of a cube are always parallel to the coordinate axes.
"""
def __init__(self, center, edge, **attr):
self.edge = edge
ShapeObject.__init__(self, attr, None, center, center)
def writeSpecification(self, file):
file.writeString('Cube{width ' + `self.edge` + \
' height ' + `self.edge` + \
' depth ' + `self.edge` + '}\n')
def memoKey(self):
return (self.edge, )
class LinearOrientedObject(ShapeObject):
def __init__(self, attr, point1, point2):
center = 0.5*(point1+point2)
axis = point2-point1
self.height = axis.length()
if self.height > 0:
axis = axis/self.height
rot_axis = VectorModule.ey.cross(axis)
sine = rot_axis.length()
cosine = VectorModule.ey*axis
angle = Transformation.angleFromSineAndCosine(sine, cosine)
if abs(angle) < 1.e-4 or abs(angle-2.*Numeric.pi) < 1.e-4:
rotation = None
else:
if abs(sine) < 1.e-4:
rot_axis = VectorModule.ex
rotation = (rot_axis, angle)
else:
rotation = None
ShapeObject.__init__(self, attr, rotation, center, center)
class Cylinder(LinearOrientedObject):
"""Cylinder
Constructor: Cylinder(|point1|, |point2|, |radius|,
|faces|='(1, 1, 1)', **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the cylinder axis (vectors)
|radius| -- the radius (a positive number)
|attributes| -- any graphics object attribute
|faces| -- a sequence of three boolean flags, corresponding to
the cylinder hull and the two circular end pieces,
specifying for each of these parts whether it is visible
or not.
"""
def __init__(self, point1, point2, radius, faces = (1, 1, 1), **attr):
self.faces = faces
self.radius = radius
LinearOrientedObject.__init__(self, attr, point1, point2)
def writeSpecification(self, file):
file.writeString('Cylinder{parts ')
if self.faces == (1,1,1):
file.writeString('ALL')
else:
plist=[]
if self.faces[0]: plist.append('SIDES')
if self.faces[1]: plist.append('BOTTOM')
if self.faces[2]: plist.append('TOP')
if plist: file.writeString( '(' + string.join(plist,'|') + ')' )
file.writeString(' radius ' + `self.radius` + \
' height ' + `self.height` + '}\n')
def memoKey(self):
return (self.radius, self.height, self.faces)
class Cone(LinearOrientedObject):
"""Cone
Constructor: Cone(|point1|, |point2|, |radius|, |face|='1', **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the cylinder axis (vectors).
|point1| is the tip of the cone.
|radius| -- the radius (a positive number)
|attributes| -- any graphics object attribute
|face| -- a boolean flag, specifying if the circular bottom is visible
"""
def __init__(self, point1, point2, radius, face = 1, **attr):
self.face = face
self.radius = radius
LinearOrientedObject.__init__(self, attr, point2, point1)
def writeSpecification(self, file):
file.writeString('Cone{parts ')
if self.face:
file.writeString('ALL')
else:
file.writeString('SIDES')
file.writeString(' bottomRadius ' + `self.radius` + \
' height ' + `self.height` + '}\n')
def memoKey(self):
return (self.radius, self.height, self.face)
class Line(ShapeObject):
"""Line
Constructor: Line(|point1|, |point2|, **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the line (vectors)
|attributes| -- any graphics object attribute
"""
def __init__(self, point1, point2, **attr):
self.points = (point1, point2)
center = 0.5*(point1+point2)
ShapeObject.__init__(self, attr, None, None, center)
def writeSpecification(self, file):
file.writeString('Coordinate3{point [' + \
`self.points[0][0]` + ' ' + `self.points[0][1]` + \
' ' + `self.points[0][2]` + ',' + \
`self.points[1][0]` + ' ' + `self.points[1][1]` + \
' ' + `self.points[1][2]` + \
']}IndexedLineSet{coordIndex[0,1,-1]}\n')
def memoKey(self):
return tuple(self.points[0]) + tuple(self.points[1])
class PolyLines(ShapeObject):
"""Multiple connected lines
Constructor: PolyLines(|points|, **|attributes|)
Arguments:
|points| -- a sequence of points to be connected by lines
|attributes| -- any graphics object attribute
"""
def __init__(self, points, **attr):
self.points = points
ShapeObject.__init__(self, attr, None, None, Vector(0., 0., 0.))
def writeSpecification(self, file):
s = 'Coordinate3{point ['
for p in self.points:
s = s + `p[0]` + ' ' + `p[1]` + ' ' + `p[2]` + ','
file.writeString(s[:-1] + ']}IndexedLineSet{coordIndex')
file.writeString(`range(len(self.points))+[-1]` + '}\n')
def memoKey(self):
return tuple(map(tuple, self.points))
class Polygons(ShapeObject):
"""Polygons
Constructor: Polygons(|points|, |index_lists|, **|attributes|)
Arguments:
|points| -- a sequence of points
|index_lists| -- a sequence of index lists, one for each polygon.
The index list for a polygon defines which points
in |points| are vertices of the polygon.
|attributes| -- any graphics object attribute
"""
def __init__(self, points, index_lists, **attr):
self.points = points
self.index_lists = index_lists
ShapeObject.__init__(self, attr, None, None, Vector(0.,0.,0.))
def writeSpecification(self, file):
file.writeString('Coordinate3{point [')
for v in self.points[:-1]:
file.writeString(`v[0]` + ' ' + `v[1]` + ' ' + `v[2]` + ',')
v = self.points[-1]
file.writeString(`v[0]` + ' ' + `v[1]` + ' ' + `v[2]` + \
']}IndexedFaceSet{coordIndex[')
for polygon in self.index_lists:
for index in polygon:
file.writeString(`index`+',')
file.writeString('-1,')
file.writeString(']}\n')
def memoKey(self):
return (tuple(map(tuple, self.points)),
tuple(map(tuple, self.index_lists)))
#
# Groups
#
class Group:
def __init__(self, objects, **attr):
self.objects = []
for o in objects:
if isGroup(o):
self.objects = self.objects + o.objects
else:
self.objects.append(o)
for key, value in attr.items():
for o in self.objects:
o[key] = value
is_group = 1
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.object[item]
def __coerce__(self, other):
if not isGroup(other):
other = Group([other])
return (self, other)
def __add__(self, other):
return Group(self.objects + other.objects)
def writeToFile(self, file):
for o in self.objects:
o.writeToFile(file)
def isGroup(x):
return hasattr(x, 'is_group')
#
# Composite Objects
#
class Arrow(Group):
"""Arrow
An arrow consists of a cylinder and a cone.
Constructor: Arrow(|point1|, |point2|, |radius|, **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the arrow (vectors).
|point2| defines the tip of the arrow.
|radius| -- the radius of the arrow shaft (a positive number)
|attributes| -- any graphics object attribute
"""
def __init__(self, point1, point2, radius, **attr):
axis = point2-point1
height = axis.length()
axis = axis/height
cone_height = min(height, 4.*radius)
cylinder_height = height - cone_height
junction = point2-axis*cone_height
cone = apply(Cone, (point2, junction, 0.75*cone_height), attr)
objects = [cone]
if cylinder_height > 0.005*radius:
cylinder = apply(Cylinder, (point1, junction, radius), attr)
objects.append(cylinder)
Group.__init__(self, objects)
#
# Materials
#
class Material(VRMLObject):
"""Material for graphics objects
A material defines the color and surface properties of an object.
Constructor: Material(**|attributes|)
The attributes are "ambient_color", "diffuse_color", "specular_color",
"emissive_color", "shininess", and "transparency".
"""
def __init__(self, **attr):
VRMLObject.__init__(self, attr)
attribute_names = VRMLObject.attribute_names + \
['ambient_color', 'diffuse_color', 'specular_color',
'emissive_color', 'shininess', 'transparency']
attribute_conversion = {'ambient_color': 'ambientColor',
'diffuse_color': 'diffuseColor',
'specular_color': 'specularColor',
'emissive_color': 'emissiveColor',
'shininess': 'shininess',
'transparency': 'transparency'}
def writeToFile(self, file):
try:
last = file.memo['material']
if last == self: return
except KeyError: pass
if file.memo.has_key(self):
file.writeString('USE ' + file.memo[self] + '\n')
else:
name = file.uniqueName()
file.memo[self] = name
file.writeString('DEF ' + name + ' Material{\n')
for key, value in self.attr.items():
file.writeString(self.attribute_conversion[key] + ' ' + \
str(value) + '\n')
file.writeString('}\n')
file.memo['material'] = self
def use(self, file):
file.memo['material'] = self
#
# Predefined materials
#
def DiffuseMaterial(color):
"Returns a material with the 'diffuse color' attribute set to |color|."
if type(color) is type(''):
color = ColorByName(color)
try:
return diffuse_material_dict[color]
except KeyError:
m = Material(diffuse_color = color)
diffuse_material_dict[color] = m
return m
diffuse_material_dict = {}
def EmissiveMaterial(color):
"Returns a material with the 'emissive color' attribute set to |color|."
if type(color) is type(''):
color = ColorByName(color)
try:
return emissive_material_dict[color]
except KeyError:
m = Material(emissive_color = color)
emissive_material_dict[color] = m
return m
emissive_material_dict = {}
#
# Test code
#
if __name__ == '__main__':
if 1:
spheres = DiffuseMaterial('brown')
links = DiffuseMaterial('orange')
s1 = Sphere(VectorModule.null, 0.05, material = spheres, reuse = 1)
s2 = Sphere(VectorModule.ex, 0.05, material = spheres, reuse = 1)
s3 = Sphere(VectorModule.ey, 0.05, material = spheres, reuse = 1)
s4 = Sphere(VectorModule.ez, 0.05, material = spheres, reuse = 1)
a1 = Arrow(VectorModule.null, VectorModule.ex, 0.01, material = links)
a2 = Arrow(VectorModule.null, VectorModule.ey, 0.01, material = links)
a3 = Arrow(VectorModule.null, VectorModule.ez, 0.01, material = links)
scene = Scene([s1, s2, s3, s4, a1, a2, a3])
scene.view()
if 0:
scene = Scene([])
scale = ColorScale(10.)
for x in range(11):
color = scale(x)
m = Material(diffuse_color = color)
scene.addObject(Cube(Vector(x,0.,0.), 0.2, material=m))
scene.view()
if 0:
points = [Vector(0., 0., 0.),
Vector(0., 1., 0.),
Vector(1., 1., 0.),
Vector(1., 0., 0.),
Vector(1., 0., 1.),
Vector(1., 1., 1.)]
indices = [[0, 1, 2, 3, 0], [3, 4, 5, 2, 3]]
scene = Scene(Polygons(points, indices,
material=DiffuseMaterial('blue')))
scene.view()
if 0:
points = [Vector(0., 0., 0.),
Vector(0., 1., 0.),
Vector(1., 1., 0.),
Vector(1., 0., 0.),
Vector(1., 0., 1.),
Vector(1., 1., 1.)]
scene = Scene(PolyLines(points, material = DiffuseMaterial('black')))
scene.view()
|
fxia22/ASM_xf
|
PythonD/site_python/Scientific/Visualization/VRML.py
|
Python
|
gpl-2.0
| 19,509
|
[
"VMD"
] |
6d28056fef17761bb62fff3bac383a45043cf0c385f4f1089eb6682f6a3ab4da
|
"""
Regression tests for the Test Client, especially the customized assertions.
"""
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from django.core.exceptions import SuspiciousOperation
import os
import sha
class AssertContainsTests(TestCase):
def test_contains(self):
"Responses can be inspected for content, including counting repeated substrings"
response = self.client.get('/test_client_regress/no_template_view/')
self.assertNotContains(response, 'never')
self.assertContains(response, 'never', 0)
self.assertContains(response, 'once')
self.assertContains(response, 'once', 1)
self.assertContains(response, 'twice')
self.assertContains(response, 'twice', 2)
try:
self.assertNotContains(response, 'once')
except AssertionError, e:
self.assertEquals(str(e), "Response should not contain 'once'")
try:
self.assertContains(response, 'never', 1)
except AssertionError, e:
self.assertEquals(str(e), "Found 0 instances of 'never' in response (expected 1)")
try:
self.assertContains(response, 'once', 0)
except AssertionError, e:
self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 0)")
try:
self.assertContains(response, 'once', 2)
except AssertionError, e:
self.assertEquals(str(e), "Found 1 instances of 'once' in response (expected 2)")
try:
self.assertContains(response, 'twice', 1)
except AssertionError, e:
self.assertEquals(str(e), "Found 2 instances of 'twice' in response (expected 1)")
try:
self.assertContains(response, 'thrice')
except AssertionError, e:
self.assertEquals(str(e), "Couldn't find 'thrice' in response")
try:
self.assertContains(response, 'thrice', 3)
except AssertionError, e:
self.assertEquals(str(e), "Found 0 instances of 'thrice' in response (expected 3)")
class AssertTemplateUsedTests(TestCase):
fixtures = ['testdata.json']
def test_no_context(self):
"Template usage assertions work then templates aren't in use"
response = self.client.get('/test_client_regress/no_template_view/')
# Check that the no template case doesn't mess with the template assertions
self.assertTemplateNotUsed(response, 'GET Template')
try:
self.assertTemplateUsed(response, 'GET Template')
except AssertionError, e:
self.assertEquals(str(e), "No templates used to render the response")
def test_single_context(self):
"Template assertions work when there is a single context"
response = self.client.get('/test_client/post_view/', {})
#
try:
self.assertTemplateNotUsed(response, 'Empty GET Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty GET Template' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, 'Empty POST Template')
except AssertionError, e:
self.assertEquals(str(e), "Template 'Empty POST Template' was not a template used to render the response. Actual template(s) used: Empty GET Template")
def test_multiple_context(self):
"Template assertions work when there are multiple contexts"
post_data = {
'text': 'Hello World',
'email': '[email protected]',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
try:
self.assertTemplateNotUsed(response, "form_view.html")
except AssertionError, e:
self.assertEquals(str(e), "Template 'form_view.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateNotUsed(response, 'base.html')
except AssertionError, e:
self.assertEquals(str(e), "Template 'base.html' was used unexpectedly in rendering the response")
try:
self.assertTemplateUsed(response, "Valid POST Template")
except AssertionError, e:
self.assertEquals(str(e), "Template 'Valid POST Template' was not a template used to render the response. Actual template(s) used: form_view.html, base.html")
class AssertRedirectsTests(TestCase):
def test_redirect_page(self):
"An assertion is raised if the original page couldn't be retrieved as expected"
# This page will redirect with code 301, not 302
response = self.client.get('/test_client/permanent_redirect_view/')
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)")
def test_lost_query(self):
"An assertion is raised if the redirect location doesn't preserve GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
try:
self.assertRedirects(response, '/test_client/get_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response redirected to 'http://testserver/test_client/get_view/?var=value', expected 'http://testserver/test_client/get_view/'")
def test_incorrect_target(self):
"An assertion is raised if the response redirects to another target"
response = self.client.get('/test_client/permanent_redirect_view/')
try:
# Should redirect to get_view
self.assertRedirects(response, '/test_client/some_view/')
except AssertionError, e:
self.assertEquals(str(e), "Response didn't redirect as expected: Response code was 301 (expected 302)")
def test_target_page(self):
"An assertion is raised if the response redirect target cannot be retrieved as expected"
response = self.client.get('/test_client/double_redirect_view/')
try:
# The redirect target responds with a 301 code, not 200
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/')
except AssertionError, e:
self.assertEquals(str(e), "Couldn't retrieve redirection page '/test_client/permanent_redirect_view/': response code was 301 (expected 200)")
class AssertFormErrorTests(TestCase):
def test_unknown_form(self):
"An assertion is raised if the form name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'wrong_form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'wrong_form' was not used to render the response")
def test_unknown_field(self):
"An assertion is raised if the field name is unknown"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'some_field', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'form' in context 0 does not contain the field 'some_field'")
def test_noerror_field(self):
"An assertion is raised if the field doesn't have any errors"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'value', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'value' on form 'form' in context 0 contains no errors")
def test_unknown_error(self):
"An assertion is raised if the field doesn't contain the provided error"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', 'email', 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The field 'email' on form 'form' in context 0 does not contain the error 'Some error.' (actual errors: [u'Enter a valid e-mail address.'])")
def test_unknown_nonfield_error(self):
"""
Checks that an assertion is raised if the form's non field errors
doesn't contain the provided error.
"""
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
try:
self.assertFormError(response, 'form', None, 'Some error.')
except AssertionError, e:
self.assertEqual(str(e), "The form 'form' in context 0 does not contain the non-field error 'Some error.' (actual errors: )")
class LoginTests(TestCase):
fixtures = ['testdata']
def test_login_different_client(self):
"Check that using a different test client doesn't violate authentication"
# Create a second client, and log in.
c = Client()
login = c.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
# Get a redirection page with the second client.
response = c.get("/test_client_regress/login_protected_redirect_view/")
# At this points, the self.client isn't logged in.
# Check that assertRedirects uses the original client, not the
# default client.
self.assertRedirects(response, "http://testserver/test_client_regress/get_view/")
class URLEscapingTests(TestCase):
def test_simple_argument_get(self):
"Get a view that has a simple string argument"
response = self.client.get(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_get(self):
"Get a view that has a string argument that requires escaping"
response = self.client.get(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
def test_simple_argument_post(self):
"Post for a view that has a simple string argument"
response = self.client.post(reverse('arg_view', args=['Slartibartfast']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Howdy, Slartibartfast')
def test_argument_with_space_post(self):
"Post for a view that has a string argument that requires escaping"
response = self.client.post(reverse('arg_view', args=['Arthur Dent']))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Hi, Arthur')
class ExceptionTests(TestCase):
fixtures = ['testdata.json']
def test_exception_cleared(self):
"#5836 - A stale user exception isn't re-raised by the test client."
login = self.client.login(username='testclient',password='password')
self.failUnless(login, 'Could not log in')
try:
response = self.client.get("/test_client_regress/staff_only/")
self.fail("General users should not be able to visit this page")
except SuspiciousOperation:
pass
# At this point, an exception has been raised, and should be cleared.
# This next operation should be successful; if it isn't we have a problem.
login = self.client.login(username='staff', password='password')
self.failUnless(login, 'Could not log in')
try:
self.client.get("/test_client_regress/staff_only/")
except SuspiciousOperation:
self.fail("Staff should be able to visit this page")
# We need two different tests to check URLconf subsitution - one to check
# it was changed, and another one (without self.urls) to check it was reverted on
# teardown. This pair of tests relies upon the alphabetical ordering of test execution.
class UrlconfSubstitutionTests(TestCase):
urls = 'regressiontests.test_client_regress.urls'
def test_urlconf_was_changed(self):
"TestCase can enforce a custom URLConf on a per-test basis"
url = reverse('arg_view', args=['somename'])
self.assertEquals(url, '/arg_view/somename/')
# This test needs to run *after* UrlconfSubstitutionTests; the zz prefix in the
# name is to ensure alphabetical ordering.
class zzUrlconfSubstitutionTests(TestCase):
def test_urlconf_was_reverted(self):
"URLconf is reverted to original value after modification in a TestCase"
url = reverse('arg_view', args=['somename'])
self.assertEquals(url, '/test_client_regress/arg_view/somename/')
|
diofeher/django-nfa
|
tests/regressiontests/test_client_regress/models.py
|
Python
|
bsd-3-clause
| 14,482
|
[
"VisIt"
] |
59061d192405ee81a55343ed76fd6c4cc8c79f819f70246674c4e9c949ff040e
|
#
# @file TestSpeciesConcentrationRule.py
# @brief SpeciesConcentrationRule unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSpeciesConcentrationRule.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestSpeciesConcentrationRule(unittest.TestCase):
global SCR
SCR = None
def setUp(self):
self.SCR = libsbml.AssignmentRule(1,2)
self.SCR.setL1TypeCode(libsbml.SBML_SPECIES_CONCENTRATION_RULE)
if (self.SCR == None):
pass
pass
def tearDown(self):
_dummyList = [ self.SCR ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesConcentrationRule_create(self):
self.assert_( self.SCR.getTypeCode() == libsbml.SBML_ASSIGNMENT_RULE )
self.assert_( self.SCR.getL1TypeCode() == libsbml.SBML_SPECIES_CONCENTRATION_RULE )
self.assert_( self.SCR.getNotes() == None )
self.assert_( self.SCR.getAnnotation() == None )
self.assert_( self.SCR.getFormula() == "" )
self.assert_( self.SCR.getType() == libsbml.RULE_TYPE_SCALAR )
self.assert_( self.SCR.getVariable() == "" )
self.assertEqual( False, self.SCR.isSetVariable() )
pass
def test_SpeciesConcentrationRule_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_SpeciesConcentrationRule_setSpecies(self):
species = "s2";
self.SCR.setVariable(species)
self.assert_(( species == self.SCR.getVariable() ))
self.assertEqual( True, self.SCR.isSetVariable() )
if (self.SCR.getVariable() == species):
pass
s = self.SCR.getVariable()
self.SCR.setVariable(s)
self.assert_(( species == self.SCR.getVariable() ))
self.SCR.setVariable("")
self.assertEqual( False, self.SCR.isSetVariable() )
if (self.SCR.getVariable() != None):
pass
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSpeciesConcentrationRule))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/sbml/TestSpeciesConcentrationRule.py
|
Python
|
gpl-3.0
| 3,332
|
[
"VisIt"
] |
d0d452ba52a92efe5edf84c86a99ed7b14a6ba0ad4fadcb7616d779f645b16cd
|
""" :mod: GFAL2_XROOTStorage
=================
.. module: python
:synopsis: XROOT module based on the GFAL2_StorageBase class.
"""
# from DIRAC
from DIRAC import gLogger
from DIRAC.Resources.Storage.GFAL2_StorageBase import GFAL2_StorageBase
class GFAL2_XROOTStorage( GFAL2_StorageBase ):
""" .. class:: GFAL2_XROOTStorage
Xroot interface to StorageElement using gfal2
"""
def __init__( self, storageName, parameters ):
""" c'tor
:param self: self reference
:param str storageName: SE name
:param str protocol: protocol to use
:param str rootdir: base path for vo files
:param str host: SE host
:param int port: port to use to communicate with :host:
:param str spaceToken: space token
:param str wspath: location of SRM on :host:
"""
self.log = gLogger.getSubLogger( "GFAL2_XROOTStorage", True )
# # init base class
GFAL2_StorageBase.__init__( self, storageName, parameters )
# self.log.setLevel( "DEBUG" )
self.pluginName = 'GFAL2_XROOT'
self.protocolParameters['Port'] = 0
self.protocolParameters['WSUrl'] = 0
self.protocolParameters['SpaceToken'] = 0
def _getExtendedAttributes( self, path ):
""" Hard coding list of attributes and then call the base method of GFAL2_StorageBase
:param self: self reference
:param str path: path of which we want extended attributes
:return S_OK( attributeDict ) if successful. Where the keys of the dict are the attributes and values the respective values
"""
# hard coding the attributes list for xroot because the plugin returns the wrong values
# xrootd.* instead of xroot.* see: https://its.cern.ch/jira/browse/DMC-664
attributes = ['xroot.cksum', 'xroot.space']
res = GFAL2_StorageBase._getExtendedAttributes( self, path, attributes )
return res
|
marcelovilaca/DIRAC
|
Resources/Storage/GFAL2_XROOTStorage.py
|
Python
|
gpl-3.0
| 1,841
|
[
"DIRAC"
] |
516bc1bac45135e7e33349bffb5f5d8e26e32a4b7083c0641328154d6eeac22b
|
# -----------------------------------------------------------------------------
# Milky Way - Turn based strategy game from Milky Way galaxy
#
# URL: https://github.com/FedericoRessi/milkyway/
# License: GPL3
# -----------------------------------------------------------------------------
'''
@author: Federico Ressi
'''
import logging
from PySide.QtGui import QWidget, QHBoxLayout, QStackedLayout, QVBoxLayout,\
QPushButton, QLabel
from milkyway import LEMMA
from milkyway.pyside.new_game import NewGamePanel
from milkyway.ui.main_window import MainWindowView
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class MainWindow(MainWindowView):
'''
Main window view
'''
widget = None
_layout = None
_main_menu = None
_main_menu_buttons = None
def _initialize_view(self):
presenter = self._presenter
self.widget = window = QWidget()
window.setWindowTitle(LEMMA)
self._layout = window_layout = QStackedLayout()
window.setLayout(window_layout)
self._main_menu = main_menu = QWidget(window)
window_layout.addWidget(main_menu)
main_menu_v_layout = QHBoxLayout()
main_menu.setLayout(main_menu_v_layout)
main_menu_v_layout.addStretch(20)
main_menu_v_layout.addWidget(QLabel(LEMMA))
main_menu_layout = QVBoxLayout()
main_menu_v_layout.addLayout(main_menu_layout)
main_menu_v_layout.addStretch(20)
main_menu_layout.addStretch()
self._main_menu_buttons = buttons = {}
buttons[MainWindowView.CONTINUE_GAME] = continue_game = QPushButton(
'Continue game')
main_menu_layout.addWidget(continue_game)
continue_game.clicked.connect(presenter.continue_game_clicked)
buttons[MainWindowView.NEW_GAME] = new_game = QPushButton('New game')
main_menu_layout.addWidget(new_game)
new_game.clicked.connect(presenter.new_game_clicked)
buttons[MainWindowView.LOAD_GAME] = load_game = QPushButton(
'Load game')
main_menu_layout.addWidget(load_game)
load_game.clicked.connect(presenter.load_game_clicked)
buttons[MainWindowView.SAVE_GAME] = save_game = QPushButton(
'Save Game')
main_menu_layout.addWidget(save_game)
save_game.clicked.connect(presenter.save_game_clicked)
buttons[MainWindowView.QUIT] = quit_button = QPushButton('Quit')
main_menu_layout.addWidget(quit_button)
quit_button.clicked.connect(presenter.quit_clicked)
main_menu_layout.addStretch()
def _dispose_view(self):
self.widget.close()
del self.widget
del self._layout
del self._main_menu
del self._main_menu_buttons
def show(self):
'''
Show the window
'''
self.widget.show()
def show_main_menu(self, enabled_options):
for option, button in self._main_menu_buttons.iteritems():
button.setEnabled(option in enabled_options)
self._layout.setCurrentWidget(self._main_menu)
def show_new_game(self):
panel = NewGamePanel(parent=self)
self._layout.addWidget(panel.widget)
presenter = self._presenter
panel.cancel.clicked.connect(presenter.cancel_clicked)
panel.accept.clicked.connect(presenter.accept_clicked)
self._layout.setCurrentWidget(panel.widget)
|
FedericoRessi/milkyway
|
milkyway/pyside/main_window.py
|
Python
|
gpl-3.0
| 3,439
|
[
"Galaxy"
] |
ce37952d98b39847d032a6e6704fe1117fed47a909079d72face2822bc1f975a
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-02-12 17:38:51
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-06-23 13:55:06
from __future__ import print_function, division, absolute_import
|
sdss/marvin
|
tests/web/__init__.py
|
Python
|
bsd-3-clause
| 297
|
[
"Brian"
] |
ceb84eb099701a775642397422f1c4661b962b09f65e026045a5952451e88843
|
import unittest
import numpy as np
from pymatgen.analysis.nmr import ChemicalShielding, ElectricFieldGradient
from pymatgen.util.testing import PymatgenTest
class TestChemicalShieldingNotation(PymatgenTest):
def test_construction(self):
cs = ChemicalShielding(np.arange(9).reshape((3, 3)))
self.assertEqual(cs.shape, (3, 3))
cs = ChemicalShielding([1, 2, 3])
self.assertEqual(cs.shape, (3, 3))
self.assertArrayEqual(np.diag(cs), [1, 2, 3])
def test_principal_axis_system(self):
cs = ChemicalShielding([1, 2, 3])
self.assertArrayEqual(cs.principal_axis_system, cs)
cs = ChemicalShielding(np.arange(9).reshape((3, 3)))
self.assertArrayAlmostEqual(
np.diag(cs.principal_axis_system),
[-1.74596669e00, -1.53807726e-15, 1.37459667e01],
decimal=5,
)
def test_notations(self):
cs = ChemicalShielding.from_maryland_notation(195.0788, 68.1733, 0.8337)
hae1 = cs.haeberlen_values
self.assertAlmostEqual(hae1.sigma_iso, 195.0788, places=5)
self.assertAlmostEqual(hae1.delta_sigma_iso, -65.33899505250002, places=5)
self.assertAlmostEqual(hae1.zeta, -43.559330035000016, places=5)
self.assertAlmostEqual(hae1.eta, 0.13013537835511454, places=5)
meh1 = cs.mehring_values
self.assertAlmostEqual(meh1.sigma_iso, 195.0788, places=5)
self.assertAlmostEqual(meh1.sigma_11, 151.51946996499998, places=5)
self.assertAlmostEqual(meh1.sigma_22, 214.02416007, places=5)
self.assertAlmostEqual(meh1.sigma_33, 219.69276996500002, places=5)
mary1 = cs.maryland_values
self.assertAlmostEqual(mary1.sigma_iso, 195.0788, places=5)
self.assertAlmostEqual(mary1.omega, 68.1733, places=5)
self.assertAlmostEqual(mary1.kappa, 0.8337, places=5)
class TestElectricFieldGradient(PymatgenTest):
def test_construction(self):
efg = ElectricFieldGradient(np.arange(9).reshape((3, 3)))
self.assertEqual(efg.shape, (3, 3))
efg = ElectricFieldGradient([1, 2, 3])
self.assertEqual(efg.shape, (3, 3))
def test_principal_axis_system(self):
efg = ElectricFieldGradient([1, 2, 3])
self.assertArrayEqual(efg.principal_axis_system, efg)
efg = ElectricFieldGradient(np.arange(9).reshape((3, 3)))
self.assertArrayAlmostEqual(
np.diag(efg.principal_axis_system),
[-1.3484692e00, -1.1543332e-15, 1.3348469e01],
decimal=5,
)
def test_Attributes(self):
efg = ElectricFieldGradient([[11.11, 1.371, 2.652], [1.371, 3.635, -3.572], [2.652, -3.572, -14.746]])
self.assertAlmostEqual(efg.V_yy, 11.516, places=3)
self.assertAlmostEqual(efg.V_xx, 4.204, places=3)
self.assertAlmostEqual(efg.V_zz, -15.721, places=3)
self.assertAlmostEqual(efg.asymmetry, 0.465, places=3)
self.assertAlmostEqual(efg.coupling_constant("Al"), 5.573, places=3)
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/analysis/tests/test_nmr.py
|
Python
|
mit
| 3,057
|
[
"pymatgen"
] |
082981f25c57a7f55b63623cc7fbc85bf836a19aabc01e2cc6871b40b5cb0108
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / [email protected]
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact [email protected]
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# [email protected]
#
# ============================================================================
from texteditdelegate import TextEditDelegate, DocumentationMetaclass
from camelot.view.controls.editors.noteeditor import NoteEditor
class NoteDelegate(TextEditDelegate):
__metaclass__ = DocumentationMetaclass
editor = NoteEditor
|
jeroendierckx/Camelot
|
camelot/view/controls/delegates/notedelegate.py
|
Python
|
gpl-2.0
| 1,320
|
[
"VisIt"
] |
91c468b2420e88a6ba7c7ab8f84ace980ce0cda94da46fb0ae257eb1aac2c3f9
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from forocacao.app.views import HomeView
urlpatterns = [
# django smart selects
url(r'^chaining/', include('smart_selects.urls')),
#url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
#url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("forocacao.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'', include("forocacao.app.urls", namespace="app")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
guegue/forocacao
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,421
|
[
"VisIt"
] |
5f2eadff8ac39c298f50994ee915baff2f052f997c87033cd351c2efeae3e7d3
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
from __future__ import print_function
import sys
class TextProgress:
def __init__(self):
self.nstep = 0
self.text = None
self.oldprogress = 0
self.progress = 0
self.calls = 0
def initialize(self, nstep, text=None):
self.nstep = float(nstep)
self.text = text
#sys.stdout.write("\n")
def update(self, step, text=None):
self.progress = int(step * 100 / self.nstep)
if self.progress/2 >= self.oldprogress/2 + 1 or self.text != text:
# just went through at least an interval of ten, ie. from 39 to 41,
# so update
mystr = "\r["
prog = int(self.progress / 10)
mystr += prog * "=" + (10-prog) * "-"
mystr += "] %3i" % self.progress + "%"
if text:
mystr += " "+text
sys.stdout.write("\r" + 70 * " ")
sys.stdout.flush()
sys.stdout.write(mystr)
sys.stdout.flush()
self.oldprogress = self.progress
if self.progress >= 100 and text == "Done":
print(" ")
return
|
ghutchis/cclib
|
src/cclib/progress/textprogress.py
|
Python
|
lgpl-2.1
| 1,618
|
[
"cclib"
] |
922d7c4acef11b2c5a4bd4c69221340aa719e51561c40e21d617efef4abac2a4
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Write Prismatic (http://prism-em.com/) input files.
"""
class Prismatic:
"""
Class to write Prismatic (http://prism-em.com/) input files.
This is designed for STEM image simulation.
"""
def __init__(self, structure, comment="Generated by pymatgen"):
"""
Args:
structure: pymatgen Structure
comment (str): comment
"""
self.structure = structure
self.comment = comment
def to_string(self):
"""
Returns: Prismatic XYZ file. This is similar to XYZ format
but has specific requirements for extra fields, headers, etc.
"""
l = self.structure.lattice
lines = [self.comment, f"{l.a} {l.b} {l.c}"]
for site in self.structure:
for sp, occu in site.species.items():
lines.append(
"{} {} {} {} {} {}".format(
sp.Z,
site.coords[0],
site.coords[1],
site.coords[2],
occu,
site.properties.get("thermal_sigma", 0),
)
)
lines.append("-1")
return "\n".join(lines)
|
vorwerkc/pymatgen
|
pymatgen/io/prismatic.py
|
Python
|
mit
| 1,339
|
[
"pymatgen"
] |
20f96cff714a28d6ec5f8cb5ebb04afecd964424d8fdf51bb32e0b6f5e52146b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from Conexion import DbPrecio_Sector
Sectores ={
"Sector_A":{
"N":["Condado del Rey","Los Andes N° 1", "Pan de Azucar","9 de Enero","Fátima","El Martillo","El Cristo"]
,"S":["Santa Clara","Villa Lucre","Colonia Las Lomas","La Pulida","Punta Fresca"]
},
"Sector_B":{
"N":["Nuevo Veranillo","San José"]
,"S":["El Crisol","El Sitio","Llano Bonito","Urb el Bosque del Hipodromo"]
},
"Sector_C":{
"N":["Centro Comercial Los Andes N°2","Don Bosco","Los Andes N°2","Samaria"]
,"S":["Altos del Hipódromo","Dorasol","Residencial Alta Vista","Residencial Altos de San Pedro","San Fernando","San Pedro","Santa Pera","Villa Venus"]
},
"Sector_D":{
"N":["Cerro Batea","San Isidro","Tinajita"]
},
"Sector_E":{
"N":[ "Colinas de Cerro Batea","Santa Librada","Templo Bahai","Valle de San Isidro"]
,"S":["Altos de Cerro Viento","Cerro Viento","Cerro Viento Rural","El Chimborazo (Juan diaz)","Las Trancas","Los Pueblos","Santa Ines"]
},
"Sector_F":{
"N":["Cerro Cocobolo","Chivo Chivo","Mano de Piedra","Santa Marta","Sonsonate","Torrijos Carter","Valle de Urracá"]
,"S":["Brisas del Golf","Ciudad Radial","El Guayabito","La Concepción","Nueva California","Pedregalito","San Antonio"]
},
"Sector_G":{
"S":["Altos de las Acacias","Don Bosco","Naranjal","Pedregal Anasal","Rana de Oro","San Joaquin","Teremar Plaza Tocumen","Villa Catalina"]
},
"Sector_H":{
"N":["Cipreses","Colonia Infantil","Versalles Campestre","Villa Zaita"]
,"S":["Villalobos (Hasta el Naranjal)"]
},
"Sector_I":{
"N":["La Cabima","Las Cumbres","Las Lajas","Lucha Franco","Nueva Libia despues del puente"]
,"S":["Club del Golf","La Bandera","Villa Adelina"]
},
"Sector_J":{
"N":["Alcalde Diaz","Ciudad Bolivar"]
,"S":["Barriada Tocumen","Ciudad Belén","Ciudad Jardin las Mañanitas","Hotel Holiday Inn","La Doña","La Siesta","Las Mañanitas","Monte Rico","Morelos"]
},
"Sector_K":{
"S":["24 de Diciembre","Altos de Tocumen","Cabuyita","Ruben Dario Paredes","Urb Dos Rios","Vista Hermosa"]
},
"Sector_L":{
"N":["Agua Buena","El Tecal","La Esmeralda","La Unión","Quebrada Ancha","San Vicente","Villa Grecia"]
}
}
def Sector_Origen(lugar):
dato = []
for area in Sectores:
# print (x)
for coordenada in Sectores[area]:
if lugar in Sectores[area][coordenada]:
dato.append(coordenada)
dato.append(area)
return dato
def Sector_Destino(lugar):
datos =Sector_Origen(lugar)
return datos
# cuando no estan la misma coordenada se usa zona o otra cosa
# Sector_Origen(lugar)
# Sector_Destino("El Tecal")
# lugar ="El Tecal"
# lugar2 ="24 de Diciembre"
# Sector_Origen = Sector_Origen(lugar)
# Sector_Destino = Sector_Destino(lugar2)
# camino = [Sector_Origen[1],Sector_Destino[1]]
# print lugar,Sector_Origen
# print lugar2,Sector_Destino
# if Sector_Origen[0] == Sector_Destino[0]:
# pass
# print "Precio ==> "+str(DbPrecio_Sector(Sector_Origen[0],camino))
# else:
# print "Esa Ruta no 'esta Disponible"
# for area in Sectores:
# for coordenada in Sectores[area]:
# for x in Sectores[area][coordenada]:
# print "\""+x+"\""
# # print "ActionButton:"
# # print "\ttext: \'"+x+"\'"
|
mdmirabal/Parcial2-Prog3
|
att.py
|
Python
|
mit
| 3,199
|
[
"SIESTA"
] |
94d9509c0c618266fba94087ce3ecdbe9a60a9dc4bda5719db8d9836d1d50777
|
# encoding: utf-8
# TODO: make abstract class for all models/managers
# to prevent code coping of common methods (for example _predict method)
from PyQt4.QtCore import *
import copy
import numpy as np
from processing.molusce.algorithms.dataprovider import Raster, ProviderError
from processing.molusce.algorithms.models.mlp.model import MLP, sigmoid
from processing.molusce.algorithms.models.sampler.sampler import Sampler
from processing.molusce.algorithms.models.correlation.model import DependenceCoef
class MlpManagerError(Exception):
'''Base class for exceptions in this module.'''
def __init__(self, msg):
self.msg = msg
class MlpManager(QObject):
'''This class gets the data extracted from the UI and
pass it to multi-layer perceptron, then gets and stores the result.
'''
updateGraph = pyqtSignal(float, float) # Train error, val. error
updateMinValErr = pyqtSignal(float) # Min validation error
updateDeltaRMS = pyqtSignal(float) # Delta of RMS: min(valError) - currentValError
updateKappa = pyqtSignal(float) # Kappa value
processFinished = pyqtSignal()
processInterrupted = pyqtSignal()
logMessage = pyqtSignal(str)
errorReport = pyqtSignal(str)
rangeChanged = pyqtSignal(str, int)
updateProgress = pyqtSignal()
def __init__(self, ns=0, MLP=None):
QObject.__init__(self)
self.MLP = MLP
self.interrupted = False
self.layers = None
if self.MLP:
self.layers = self.getMlpTopology()
self.ns = ns # Neighbourhood size of training rasters.
self.data = None # Training data
self.catlist = None # List of unique output values of the output raster
self.train_error = None # Error on training set
self.val_error = None # Error on validation set
self.minValError = None # The minimum error that is achieved on the validation set
self.valKappa = 0 # Kappa on on the validation set
self.sampler = None # Sampler
# Results of the MLP prediction
self.prediction = None # Raster of the MLP prediction results
self.confidence = None # Raster of the MLP results confidence (1 = the maximum confidence, 0 = the least confidence)
self.transitionPotentials = None # Dictionary of transition potencial maps: {category1: map1, category2: map2, ...}
# Outputs of the activation function for small and big numbers
self.sigmax, self.sigmin = sigmoid(100), sigmoid(-100) # Max and Min of the sigmoid function
self.sigrange = self.sigmax - self.sigmin # Range of the sigmoid
def computeMlpError(self, sample):
'''Get MLP error on the sample'''
input = np.hstack( (sample['state'], sample['factors']) )
out = self.getOutput( input )
err = ((sample['output'] - out)**2).sum()/len(out)
return err
def computePerformance(self, train_indexes, val_ind):
'''Check errors of training and validation sets
@param train_indexes Tuple that contains indexes of the first and last elements of the training set.
@param val_ind Tuple that contains indexes of the first and last elements of the validation set.
'''
train_error = 0
train_sampl = train_indexes[1] - train_indexes[0] # Count of training samples
for i in range(train_indexes[0], train_indexes[1]):
train_error = train_error + self.computeMlpError(sample = self.data[i])
self.setTrainError(train_error/train_sampl)
if val_ind:
val_error = 0
val_sampl = val_ind[1] - val_ind[0]
answers = np.ma.zeros(val_sampl)
out = np.ma.zeros(val_sampl)
for i in xrange(val_ind[0], val_ind[1]):
sample = self.data[i]
val_error = val_error + self.computeMlpError(sample = self.data[i])
input = np.hstack( (sample['state'],sample['factors']) )
output = self.getOutput(input)
out[i-val_ind[0]] = self.outCategory(output)
answers[i-val_ind[0]] = self.outCategory(sample['output'])
self.setValError(val_error/val_sampl)
depCoef = DependenceCoef(out, answers, expand=True)
self.valKappa = depCoef.kappa(mode=None)
def copyWeights(self):
'''Deep copy of the MLP weights'''
return copy.deepcopy(self.MLP.weights)
def createMlp(self, state, factors, output, hidden_layers):
'''
@param state Raster of the current state (categories) values.
@param factors List of the factor rasters (predicting variables).
@param hidden_layers List of neuron counts in hidden layers.
@param ns Neighbourhood size.
'''
if output.getBandsCount() != 1:
raise MlpManagerError('Output layer must have one band!')
input_neurons = 0
for raster in factors:
input_neurons = input_neurons+ raster.getNeighbourhoodSize(self.ns)
# state raster contains categories. We need use n-1 dummy variables (where n = number of categories)
input_neurons = input_neurons + (len(state.getBandGradation(1))-1) * state.getNeighbourhoodSize(self.ns)
# Output category's (neuron) list and count
self.catlist = output.getBandGradation(1)
categories = len(self.catlist)
# set neuron counts in the MLP layers
self.layers = hidden_layers
self.layers.insert(0, input_neurons)
self.layers.append(categories)
self.MLP = MLP(*self.layers)
def getConfidence(self):
return self.confidence
def getInputVectLen(self):
'''Length of input data vector of the MLP'''
shape = self.getMlpTopology()
return shape[0]
def getOutput(self, input_vector):
out = self.MLP.propagate_forward( input_vector )
return out
def getOutputVectLen(self):
'''Length of input data vector of the MLP'''
shape = self.getMlpTopology()
return shape[-1]
def getOutputVector(self, val):
'''Convert a number val into vector,
for example, let self.catlist = [1, 3, 4] then
if val = 1, result = [ 1, -1, -1]
if val = 3, result = [-1, 1, -1]
if val = 4, result = [-1, -1, 1]
where -1 is minimum of the sigmoid, 1 is max of the sigmoid
'''
size = self.getOutputVectLen()
res = np.ones(size) * (self.sigmin)
ind = np.where(self.catlist==val)
res[ind] = self.sigmax
return res
def getMinValError(self):
return self.minValError
def getMlpTopology(self):
return self.MLP.shape
def getKappa(self):
return self.valKappa
def getPrediction(self, state, factors, calcTransitions=False):
self._predict(state, factors, calcTransitions)
return self.prediction
def getTrainError(self):
return self.train_error
def getTransitionPotentials(self):
return self.transitionPotentials
def getValError(self):
return self.val_error
def outCategory(self, out_vector):
# Get index of the biggest output value as the result
biggest = max(out_vector)
res = list(out_vector).index(biggest)
res = self.catlist[res]
return res
def outputConfidence(self, output, scale=True):
'''
Return confidence (difference between 2 biggest values) of the MLP output.
@param output: The confidence
@param scale: If True, then scale the confidence to int [0, 1, ..., 100] percent
'''
out_scl = self.scaleOutput(output, percent=scale)
out_scl.sort()
return out_scl[-1] - out_scl[-2]
def outputTransitions(self, output, scale=True):
'''
Return transition potencial of the outputs scaled to [0,1] or 1-100
@param output: The output of MLP
@param scale: If True, then scale the transitions to int ([0, 1, ..., 100]) percent
'''
out_scl = self.scaleOutput(output, percent=scale)
result = {}
for r, v in enumerate(out_scl):
cat = self.catlist[r]
result[cat] = v
return result
def scaleOutput(self, output, percent=True):
'''
Scale the output to range [0,1] or 1-100
@param output: Output of a MLP
@param percent: If True, then scale the output to int [0, 1, ..., 100] percent
'''
res = 1.0 * (output - self.sigmin) / self.sigrange
if percent:
res = [ int(100 * x) for x in res]
return res
def _predict(self, state, factors, calcTransitions=False):
'''
Calculate output and confidence rasters using MLP model and input rasters
@param state Raster of the current state (categories) values.
@param factors List of the factor rasters (predicting variables).
'''
try:
self.rangeChanged.emit(self.tr("Initialize model %p%"), 1)
geodata = state.getGeodata()
rows, cols = geodata['ySize'], geodata['xSize']
for r in factors:
if not state.geoDataMatch(r):
raise MlpManagerError('Geometries of the input rasters are different!')
self.transitionPotentials = None # Reset tr.potentials if they exist
# Normalize factors before prediction:
for f in factors:
f.normalize(mode = 'mean')
predicted_band = np.zeros([rows, cols], dtype=np.uint8)
confidence_band = np.zeros([rows, cols], dtype=np.uint8)
if calcTransitions:
self.transitionPotentials = {}
for cat in self.catlist:
self.transitionPotentials[cat] = np.zeros([rows, cols], dtype=np.uint8)
self.sampler = Sampler(state, factors, ns=self.ns)
mask = state.getBand(1).mask.copy()
if mask.shape == ():
mask = np.zeros([rows, cols], dtype=np.bool)
self.updateProgress.emit()
self.rangeChanged.emit(self.tr("Prediction %p%"), rows)
for i in xrange(rows):
for j in xrange(cols):
if not mask[i,j]:
input = self.sampler.get_inputs(state, i,j)
if input != None:
out = self.getOutput(input)
res = self.outCategory(out)
predicted_band[i, j] = res
confidence = self.outputConfidence(out)
confidence_band[i, j] = confidence
if calcTransitions:
potentials = self.outputTransitions(out)
for cat in self.catlist:
map = self.transitionPotentials[cat]
map[i, j] = potentials[cat]
else: # Input sample is incomplete => mask this pixel
mask[i, j] = True
self.updateProgress.emit()
predicted_bands = [np.ma.array(data = predicted_band, mask = mask, dtype=np.uint8)]
confidence_bands = [np.ma.array(data = confidence_band, mask = mask, dtype=np.uint8)]
self.prediction = Raster()
self.prediction.create(predicted_bands, geodata)
self.confidence = Raster()
self.confidence.create(confidence_bands, geodata)
if calcTransitions:
for cat in self.catlist:
band = [np.ma.array(data=self.transitionPotentials[cat], mask=mask, dtype=np.uint8)]
self.transitionPotentials[cat] = Raster()
self.transitionPotentials[cat].create(band, geodata)
except MemoryError:
self.errorReport.emit(self.tr("The system out of memory during ANN prediction"))
raise
except:
self.errorReport.emit(self.tr("An unknown error occurs during ANN prediction"))
raise
def readMlp(self):
pass
def resetErrors(self):
self.val_error = np.finfo(np.float).max
self.train_error = np.finfo(np.float).max
def resetMlp(self):
self.MLP.reset()
self.resetErrors()
def saveMlp(self):
pass
def saveSamples(self, fileName):
self.sampler.saveSamples(fileName)
def setMlpWeights(self, w):
'''Set weights of the MLP'''
self.MLP.weights = w
def setTrainingData(self, state, factors, output, shuffle=True, mode='All', samples=None):
'''
@param state Raster of the current state (categories) values.
@param factors List of the factor rasters (predicting variables).
@param output Raster that contains categories to predict.
@param shuffle Perform random shuffle.
@param mode Type of sampling method:
All Get all pixels
Random Get samples. Count of samples in the data=samples.
Stratified Undersampling of major categories and/or oversampling of minor categories.
@samples Sample count of the training data (doesn't used in 'All' mode).
'''
if not self.MLP:
raise MlpManagerError('You must create a MLP before!')
# Normalize factors before sampling:
for f in factors:
f.normalize(mode = 'mean')
self.sampler = Sampler(state, factors, output, self.ns)
self.sampler.setTrainingData(state=state, output=output, shuffle=shuffle, mode=mode, samples=samples)
outputVecLen = self.getOutputVectLen()
stateVecLen = self.sampler.stateVecLen
factorVectLen = self.sampler.factorVectLen
size = len(self.sampler.data)
self.data = np.zeros(size, dtype=[('coords', float, 2), ('state', float, stateVecLen), ('factors', float, factorVectLen), ('output', float, outputVecLen)])
self.data['coords'] = self.sampler.data['coords']
self.data['state'] = self.sampler.data['state']
self.data['factors'] = self.sampler.data['factors']
self.data['output'] = [self.getOutputVector(sample['output']) for sample in self.sampler.data]
def setTrainError(self, error):
self.train_error = error
def setValError(self, error):
self.val_error = error
def setEpochs(self, epochs):
self.epochs = epochs
def setValPercent(self, value=20):
self.valPercent = value
def setLRate(self, value=0.1):
self.lrate = value
def setMomentum(self, value=0.01):
self.momentum = value
def setContinueTrain(self, value=False):
self.continueTrain = value
def startTrain(self):
self.train(self.epochs, self.valPercent, self.lrate, self.momentum, self.continueTrain)
def stopTrain(self):
self.interrupted = True
def train(self, epochs, valPercent=20, lrate=0.1, momentum=0.01, continue_train=False):
'''Perform the training procedure on the MLP and save the best neural net
@param epoch Max iteration count.
@param valPercent Percent of the validation set.
@param lrate Learning rate.
@param momentum Learning momentum.
@param continue_train If False then it is new training cycle, reset weights training and validation error. If True, then continue training.
'''
try:
samples_count = len(self.data)
val_sampl_count = samples_count*valPercent/100
apply_validation = True if val_sampl_count>0 else False # Use or not use validation set
train_sampl_count = samples_count - val_sampl_count
# Set first train_sampl_count as training set, the other as validation set
train_indexes = (0, train_sampl_count)
val_indexes = (train_sampl_count, samples_count) if apply_validation else None
if not continue_train: self.resetMlp()
self.minValError = self.getValError() # The minimum error that is achieved on the validation set
last_train_err = self.getTrainError()
best_weights = self.copyWeights() # The MLP weights when minimum error that is achieved on the validation set
self.rangeChanged.emit(self.tr("Train model %p%"), epochs)
for epoch in range(epochs):
self.trainEpoch(train_indexes, lrate, momentum)
self.computePerformance(train_indexes, val_indexes)
self.updateGraph.emit(self.getTrainError(), self.getValError())
self.updateDeltaRMS.emit(self.getMinValError() - self.getValError())
self.updateKappa.emit(self.getKappa())
QCoreApplication.processEvents()
if self.interrupted:
self.processInterrupted.emit()
break
last_train_err = self.getTrainError()
self.setTrainError(last_train_err)
if apply_validation and (self.getValError() < self.getMinValError()):
self.minValError = self.getValError()
best_weights = self.copyWeights()
self.updateMinValErr.emit(self.getMinValError())
self.updateProgress.emit()
self.setMlpWeights(best_weights)
except MemoryError:
self.errorReport.emit(self.tr("The system out of memory during ANN training"))
raise
except:
self.errorReport.emit(self.tr("An unknown error occurs during ANN trainig"))
raise
finally:
self.processFinished.emit()
def trainEpoch(self, train_indexes, lrate=0.1, momentum=0.01):
'''Perform a training epoch on the MLP
@param train_ind Tuple of the min&max indexes of training samples in the samples data.
@param val_ind Tuple of the min&max indexes of validation samples in the samples data.
@param lrate Learning rate.
@param momentum Learning momentum.
'''
train_sampl = train_indexes[1] - train_indexes[0]
for i in range(train_sampl):
n = np.random.randint( *train_indexes )
sample = self.data[n]
input = np.hstack( (sample['state'],sample['factors']) )
self.getOutput( input ) # Forward propagation
self.MLP.propagate_backward( sample['output'], lrate, momentum )
|
alfanugraha/LUMENS-repo
|
processing/molusce/algorithms/models/mlp/manager.py
|
Python
|
gpl-2.0
| 18,963
|
[
"NEURON"
] |
e11bae10a44d7906522a899fe677ab20d3cf1a448198ffd8facfb8ea61ec7cc8
|
# coding: utf-8
# Copyright © 2016 Bharadwaj Raju <[email protected]>
# Contributor: Maksudur Rahman Maateen <[email protected]>
# This file is part of TextSuggest.
# TextSuggest is free software.
# Licensed under the GNU General Public License 3
# See included LICENSE file or visit https://www.gnu.org/licenses/gpl.txt
import subprocess as sp
import re
def get_language_name():
# This function will return the language name
# Reading keyboard layout from shell command
# TODO: Add more definitions
languages = {
'bd' : 'Bangla',
'us' : 'English',
'uk' : 'English',
'gb' : 'English',
'ara': 'Arabic',
'cn' : 'Chinese',
'tw' : 'Chinese',
'de' : 'German',
'jp' : 'Japanese',
'ru' : 'Russian',
'es' : 'Spanish',
'se' : 'Swedish',
'fi' : 'Finnish',
'kr' : 'Korean',
'pk' : 'Urdu',
'fr' : 'French',
'gr' : 'Greek',
'ua' : 'Ukrainian'
}
xkb_map = sp.check_output(
['setxkbmap', '-print'],
universal_newlines=True)
for i in xkb_map.splitlines():
if 'xkb_symbols' in i:
kbd_layout = i.strip().split()
kbd_layout = kbd_layout[kbd_layout.index('include') + 1].split('+')[1]
# Sometimes some text is included in brackets, remove that
kbd_layout = re.sub(r'\(.*?\)', '', kbd_layout)
# Language will be detected by layout
if kbd_layout in languages:
return languages[kbd_layout]
else:
return 'English'
|
maateen/TextSuggestBangla
|
languages.py
|
Python
|
gpl-3.0
| 1,440
|
[
"VisIt"
] |
4a318ce8576227e089c2b5d07bff3260fbfb89cde84848d3bd0ddacbd52a55da
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense,Dropout,Conv1D,MaxPooling1D,Flatten
from keras.utils import np_utils
import numpy as np
def get_cnn_model(input_feature_dim):
model = Sequential()
model.add(Conv1D(64,3,input_shape=(input_feature_dim,1),padding='same'))
model.add(Dropout(0.3))
model.add(Conv1D(32,3,padding='same'))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(64, 3, padding='same'))
model.add(Dropout(0.3))
model.add(Conv1D(32, 3, padding='same'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(2,activation='softmax'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
return model
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
feature_filenames = ['YouTube_visual.csv','YouTube_vocal.csv','YouTube_acoustic.csv']
class_label_filename = 'YouTube_sentiment_label.csv'
class_labels = pd.read_csv(class_label_filename,header=None)
dataframe_list = list([])
for feature_filename in feature_filenames:
df = pd.read_csv(feature_filename,header=None)
dataframe_list.append(df.values)
combined_features = reduce(lambda x,y:np.hstack((x,y)),dataframe_list)
del dataframe_list
X = combined_features
y = class_labels.values
X = X.reshape(X.shape[0],X.shape[1],1)
y = np_utils.to_categorical(y,2)
model = get_cnn_model(X.shape[1])
model.fit(X,y,validation_split=0.1,batch_size=50,epochs=150,verbose=2)
|
rupakc/Kaggle-Compendium
|
Multimodal Sentiment Analysis/multimodal_baseline.py
|
Python
|
mit
| 3,373
|
[
"Gaussian"
] |
05bf9b2a7320e13c8b1b7842d97ce2cb9ef089847606c3ec846eb8302297fed3
|
from eventize.events import Handler
from eventize.typing import Modifier
def is_string(event):
return isinstance(event.content, str)
def titlecase(event):
event.content = event.content.title()
class WeirdVisitor(Modifier):
def visit(self, handler):
handler.prepend([self.save_default])
def save_default(self, event):
self.default = event.content
my_visitor = WeirdVisitor()
handler = Handler(titlecase, my_visitor, condition=is_string)
# An Handler is a callable list
assert isinstance(handler, list)
assert callable(handler)
# handler contains 2 callbacks:
assert len(handler) == 2
assert titlecase in handler
assert my_visitor.save_default in handler
# it remove titlecase
handler -= titlecase
assert titlecase not in handler
# it adds titlecase
handler += titlecase
# Create event with attribute content and trigger it
event1 = handler.notify(content="a string")
assert my_visitor.default == "a string"
assert event1.content == "A String"
# if event.content is not a string propagation is stopped
# these 2 lines are sames as notify
event2 = handler.make_event(content=1234)
handler(event2)
assert len(handler.events) == 2
assert handler.events == (event1, event2)
expected_message = "Condition '%s' for event 'Event' return False" % id(is_string)
assert event2.messages[0] == expected_message
# we remove all past events:
handler.clear_events()
assert len(handler.events) == 0
# we remove all callbacks and events:
handler.clear()
assert len(handler) == 0
is_a_name = lambda event: event.content == "a name"
# create a new subhandler with a condition:
handler.when(is_a_name).do(my_visitor.save_default).then(titlecase)
event1 = handler.notify(content="a name")
event2 = handler.notify(content="a string")
# only "a name" is titlecased
assert event1.content == "A Name"
assert event2.content == "a string"
# save_default is called only for event1:
assert my_visitor.default == "a name"
|
apieum/eventize
|
eventize/tests/examples/subject_observer.py
|
Python
|
lgpl-3.0
| 1,938
|
[
"VisIt"
] |
156ba4f93f2da29935837a0d4bc1d0805c699457cae38c84a36b88f965e37e28
|
#!/usr/bin/env python
#
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002 Bryce "Zooko" Wilcox-O'Hearn
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
#
__cvsid = '$Id: mencode_unittests.py,v 1.1 2002/06/25 03:54:57 zooko Exp $'
# Python standard library modules
import operator
import random
import traceback
try:
import unittest
except:
class unittest:
class TestCase:
pass
pass
pass
# pyutil modules
import humanreadable
import memutil
# Mnet modules
from mencode import *
class Testy(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_decode_random_illformed_junk(self):
try:
mdecode(string.join(filter(lambda x: x != ':', map(chr, map(random.randrange, [0]*20, [256]*20))), ''))
raise "This shouldn't have decoded without an exception."
except MencodeError:
# Good. That was definitely ill-formed.
pass
def test_decode_other_random_illformed_junk(self):
l = random.randrange(0, 200)
s = str(l) + ':' + "x" * (l-1) # too short. Heh heh.
try:
mdecode(s)
raise "This shouldn't have decoded without an exception."
except MencodeError:
# Good. That was definitely ill-formed.
pass
def test_decode_unknown(self):
try:
decode_unknown('(())', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_unknown('((111))', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
assert decode_unknown('((0:))', 0) == (UNKNOWN_TYPE, 5)
assert decode_unknown(')', 0) == (UNKNOWN_TYPE, 0)
assert decode_unknown('1:a2:ab)', 0) == (UNKNOWN_TYPE, 7)
def test_encode_and_decode_string_with_nulls(self):
strwn = "\000\001\000"
def test_encode_and_decode_none(self):
assert mdecode(mencode(None)) == None
def test_encode_and_decode_long(self):
assert mdecode(mencode(-23452422452342L)) == -23452422452342L
def test_encode_and_decode_int(self):
assert mdecode(mencode(2)) == 2
def test_dict_enforces_order(self):
mdecode('(4:dict(3:int1:0)(4:null)(3:int1:1)(4:null))')
try:
mdecode('(4:dict(3:int1:1)(4:null)(3:int1:0)(4:null))')
except MencodeError:
pass
def test_dict_forbids_key_repeat(self):
try:
mdecode('(4:dict(3:int1:1)(4:null)(3:int1:1)(4:null))')
except MencodeError:
pass
def test_decode_unknown_type_not_in_dict(self):
try:
mdecode('(7:garbage)')
return false
except UnknownTypeError:
pass
def test_decode_unknown_type_in_dict(self):
# I strongly disagree with this feature. It violates canonicity (which, as we all know, open up security holes), as well as being potentially confusing to debuggers and to mencode maintainers, and it is currently not needed. --Zooko 2001-06-03
assert mdecode('(4:dict(7:garbage)(3:int1:4)(4:null)(3:int1:5))') == {None: 5}
assert mdecode('(4:dict(4:null)(3:int1:5)(3:int1:4)(7:garbage))') == {None: 5}
def test_MencodeError_in_decode_unknown(self):
try:
mdecode('(4:dict(7:garbage)(2:int1:4)(4:null)(3:int1:5))')
return 0
except MencodeError:
pass
def test_decode_raw_string(self):
assert decode_raw_string('1:a', 0) == ('a', 3)
assert decode_raw_string('0:', 0) == ('', 2)
assert decode_raw_string('10:aaaaaaaaaaaaaaaaaaaaaaaaa', 0) == ('aaaaaaaaaa', 13)
assert decode_raw_string('10:', 1) == ('', 3)
try:
decode_raw_string('11:', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_raw_string('01:a', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_raw_string('11', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_raw_string('h', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_raw_string('h:', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
def test_decode_noncanonical_int(self):
try:
mdecode('(3:int2:03)')
assert false, "non canonical integer allowed '03'"
except MencodeError:
pass
try:
mdecode('(3:int2:3 )')
assert false, "non canonical integer allowed '3 '"
except MencodeError:
pass
try:
mdecode('(3:int2: 3)')
assert false, "non canonical integer allowed ' 3'"
except MencodeError:
pass
try:
mdecode('(3:int2:-0)')
assert false, "non canonical integer allowed '-0'"
except MencodeError:
pass
def test_encode_and_decode_hash_key(self):
x = {42: 3}
y = {'42': 3}
assert mdecode(mencode(x)) == x
assert mdecode(mencode(y)) == y
def test_encode_and_decode_list(self):
assert mdecode(mencode([])) == []
def test_encode_and_decode_tuple(self):
assert mdecode(mencode(())) == []
def test_encode_and_decode_dict(self):
assert mdecode(mencode({})) == {}
def test_encode_and_decode_complex_object(self):
spam = [[], 0, -3, -345234523543245234523L, {}, 'spam', None, {'a': 3}, {69: []}]
assert mencode(mdecode(mencode(spam))) == mencode(spam)
assert mdecode(mencode(spam)) == spam
def test_preencoded_thing(self):
thing = {"dirty limmerk": ["there once was a man from peru", "who set out to sail a canoe"]}
pthing = PreEncodedThing(thing)
assert len(mencode(thing)) == len(pthing)
assert mencode(pthing) == mencode(thing)
assert mdecode(mencode(thing)) == mdecode(mencode(pthing))
def test_dict_as_key(self):
try:
mdecode('(4:dict(4:dict)(4:null))')
assert false, "dict cannot be a key but it was allowed by mdecode"
except MencodeError:
return
def test_rej_dict_with_float(self):
try:
s = mencode({'foo': 0.9873})
assert 0, "You can't encode floats! Anyway, the result: %s, is probably not what we meant." % humanreadable.hr(s)
except MencodeError, le:
try:
# print "got exce1: %s" % humanreadable.hr(le)
s2 = mencode({'foo': 0.9873})
assert 0, "You can't encode floats! Anyway, the result: %s, is probably not what we meant." % humanreadable.hr(s2)
except MencodeError, le:
# print "got exce2: %s" % humanreadable.hr(le)
# Good! we want an exception when we try this.
return
def test_rej_float(self):
try:
s = mencode(0.9873)
assert 0, "You can't encode floats! Anyway, the result: %s, is probably not what we meant." % humanreadable.hr(s)
except MencodeError, le:
try:
s2 = mencode(0.9873)
assert 0, "You can't encode floats! Anyway, the result: %s, is probably not what we meant." % humanreadable.hr(s2)
except MencodeError, le:
# Good! we want an exception when we try this.
return
def test_no_leakage(self):
# Test every (other) test here for leakage! That's my cheap way to try to exercise the weird internal cases in the compiled code...
for m in dir(self.__class__):
if m[:len("test_")] == "test_":
if m != "test_no_leakage":
# print "testing for memory leak: %s" % m
self._help_test_no_leakage(getattr(self, m))
def _help_test_no_leakage(self, f):
slope = memutil.measure_mem_leakage(f, 2**7, iterspersample=2**4)
# print "slope: ", slope
if slope > 0.0001:
raise "%s leaks memory at a rate of approximately %s Python objects per invocation" % (f, slope,)
def _bench_it_mencode(n):
"""
For use with utilscripts/benchfunc.py.
"""
d = {}
for i in xrange(n):
d[i] = { i: 'spam', i + 1: 'eggs', i * 2: 'bacon'}
mencode(d)
def _bench_it_mencode_plus_mdecode(n):
"""
For use with utilscripts/benchfunc.py.
"""
d = {}
for i in xrange(n):
d[i] = { i: 'spam', i + 1: 'eggs', i * 2: 'bacon'*n}
mdecode(mencode(d))
def _profile_test_mdecode_implementation_speed():
import mojoutil
profit = mojoutil._dont_enable_if_you_want_speed_profit
profit(_real_test_mdecode_implementation_speed)
def _real_test_mdecode_implementation_speed():
import os
import time
msgpath = os.path.join(os.environ.get('HOME'), 'tmp/messages')
filenamelist = os.listdir(msgpath)
filenamelist.sort()
encoded_messages = []
sizes_list = []
for name in filenamelist:
encoded_messages.append( open(os.path.join(msgpath, name), 'rb').read() )
sizes_list.append( len(encoded_messages[-1]) )
totalbytes = reduce(lambda a,b: a+b, sizes_list)
average = totalbytes / len(sizes_list)
sizes_list.sort()
median = sizes_list[len(sizes_list)/2]
print 'read in %d messages totaling %d bytes, averaging %d bytes, median size of %d' % (len(sizes_list), totalbytes, average, median)
### 100% python speed test
print 'decoding using python implementation...'
# setup
decodersdict['string'] = decode_raw_string
# end setup
t1 = time.time()
for m in encoded_messages:
try:
mdecode(m)
except:
print '!',
t2 = time.time()
print 'done. total decoding time: %3.3f' % (t2 - t1,)
### partial C speed test
print 'decoding using partial C implementation...'
# setup
decode_raw_string = _c_mencode_help._c_decode_raw_string
decodersdict['string'] = decode_raw_string
# end setup
t1 = time.time()
for m in encoded_messages:
try:
mdecode(m)
except:
print '!',
t2 = time.time()
print 'done. total decoding time: %3.3f' % (t2 - t1,)
def _profile_test_mencode_implementation_speed():
import mojoutil
profit = mojoutil._dont_enable_if_you_want_speed_profit
profit(_real_test_mencode_implementation_speed)
def _real_test_mencode_implementation_speed():
import os
import time
msgpath = os.path.join(os.environ.get('HOME'), 'tmp/messages')
filenamelist = os.listdir(msgpath)
filenamelist.sort()
decoded_messages = []
sizes_list = []
for name in filenamelist:
encoding = open(os.path.join(msgpath, name), 'rb').read()
sizes_list.append( len(encoding) )
decoded_messages.append( mdecode(encoding) )
totalbytes = reduce(lambda a,b: a+b, sizes_list)
average = totalbytes / len(sizes_list)
sizes_list.sort()
median = sizes_list[len(sizes_list)/2]
print 'read and decoded %d messages totaling %d bytes, averaging %d bytes, median size of %d' % (len(sizes_list), totalbytes, average, median)
### 100% python speed test
print 'encoding using python implementation...'
# setup
# TODO none needed yet
# end setup
t1 = time.time()
for m in decoded_messages:
try:
mencode(m)
except:
print '!',
t2 = time.time()
print 'done. total encoding time: %3.3f' % (t2 - t1,)
def _real_test_encode_string_implementation_speed():
import os, time
ntests = 500
mlog = os.path.join(os.environ.get('MNETDIR'), 'common', 'mencode.py')
lines = open(mlog, 'r').readlines()
del(mlog)
o = StringIO()
t1 = time.time()
for i in xrange(ntests):
for line in lines:
encode_string(line, o)
o.seek(0)
t2 = time.time()
print 'done testing python impl of encode_string. total encoding time: %3.3f' % (t2 - t1,)
_c_encode_string = _c_mencode_help._c_encode_string
o = StringIO()
t1 = time.time()
for i in xrange(ntests):
for line in lines:
_c_encode_string(line, o)
o.seek(0)
t2 = time.time()
print 'done testing C impl of encode_string. total encoding time: %3.3f' % (t2 - t1,)
if __name__ == '__main__':
if hasattr(unittest, 'main'):
unittest.main()
else:
# Here's our manual implementation of unittest:
t = Testy()
for m in dir(t.__class__):
if m[:len("test_")] == "test_":
print m, "... ",
getattr(t, m)()
print
pass
|
zooko/egtp
|
common/mencode_unittests.py
|
Python
|
agpl-3.0
| 13,498
|
[
"VisIt"
] |
867ecb790fd290632f8197e3d8a11cf7f5cef7ad3c2300fa20bec22d37ee3e45
|
# teams.prac.py
teams_list = [{"Real Madrid" : [["Cordoba", "24.8.2014", "Home"],["Real Sociedad", "31.8.2014","Away"],
["Atletico Madrid","14.9.2014","Home"], ["Deportivo", "21.9.2014","Away"],
["Elche","24.9.2014","Home"], ["Villarreal", "28.9.2014","Away"],
["Athletic Bilbao","5.10.2014","Home"],["Levante","19.10.2014","Away"],
["Barcelona","26.10.2014","Home"],["Granada","2.11.2014","Away"],
["Rayo Vallecano","9.11.2014","Home"],["Eibar","23.11.2014","Away"],
["Malaga","30.11.2014","Away"],["Celta","7.12.2014","Home"],
["Almeria","14.12.2014","Away"],["Sevilla","21.12.2014","Home"],
["Valencia","4.1.2015","Away"],["Espanyol","11.1.2015","Home"],
["Getafe","18.1.2015","Away"],["Cordoba", "25.1.2015", "Away"],["Real Sociedad", "1.2.2015","Home"],
["Atletico Madrid","8.2.2015","Away"], ["Deportivo", "15.2.2015","Home"],
["Elche","22.2.2015","Away"], ["Villarreal", "1.3.2015","Home"],
["Athletic Bilbao","8.3.2015","Away"],["Levante","15.3.2015","Home"],
["Barcelona","22.3.2015","Away"],["Granada","5.4.2015","Home"],
["Rayo Vallecano","8.4.2015","Away"],["Eibar","12.4.2015","Home"],
["Malaga","19.4.2014","Home"],["Celta","26.4.2014","Away"],
["Almeria","29.4.2014","Home"],["Sevilla","3.5.2014","Away"],
["Valencia","10.5.2015","Home"],["Espanyol","17.5.2015","Away"],
["Getafe","24.5.2015","Home"]]},
{"Arsenal": [["Crystal Palace","16.8.2014","Home"],["Everton","23.8.2014","Away"],
["Leicester City","30.8.2014","Away"],["Manchester City","13.9.2014","Home"],
["Aston Villa","20.9.2014","Away"],["Tottenham Hotspur","27.9.2014","Home"],
["Chelsea","4.10.2014","Away"],["Hull City","18.10.2014","Home"],
["Sunderland","25.10.2014","Away"],["Burnley","1.11.2014","Home"],
["Swansea City","8.11.2014","Away"],["Manchester United","22.11.2014","Home"],
["West Bromwich Albion","29.11.2014","Away"],["Southampton","2.12.2014","Home"],
["Stoke City","6.12.2014","Away"],["Newcastle United","13.12.2014","Home"],
["Liverpool","20.12.2014","Away"],["Queens Park Rangers","26.12.2014","Home"],
["West Ham United","28.12.2014","Away"],["Southampton","1.1.2015","Away"],
["Stoke City","10.1.2015","Home"],["Manchester City","17.1.2015","Away"],
["Aston Villa","31.1.2015","Home"],["Tottenham Hotspur","7.2.2015","Away"],
["Leicester City","10.2.2015","Home"],["Crystal Palace","21.2.2015","Away"],
["Everton","28.2.2015","Home"],["Queens Park Rangers","3.3.2015","Away"],["West Ham United","14.3.2015","Home"],
["Newcastle United","21.3.2015","Away"],["Liverpool","4.4.2015","Home"],
["Burnley","11.4.2015","Away"],["Sunderland","18.4.2015","Home"],
["Chelsea","25.4.2015","Home"],["Hull City","2.5.2015","Away"],
["Swansea City","9.5.2015","Home"],["Manchester United","16.5.2015","Away"],
["West Bromwich Albion","24.5.2015","Home"]]},
{"Barcelona": [['Elche', '24.8.2014', 'Home'], ['Villarreal', '31.8.2014', 'Away'],
['Athletic Bilbao', '14.9.2014', 'Home'], ['Levante', '21.9.2014', 'Away'],
['Malaga', '24.9.2014', 'Away'], ['Granada', '28.9.2014', 'Home'],
['Rayo Vallecano', '5.10.2014', 'Away'], ['Eibar', '19.10.2014', 'Home'],
['Real Madrid', '26.10.2014', 'Away'], ['Celta', 'Vigo', '2.11.2014', 'Home'],
['Almeria', '9.11.2014', 'Away'], ['Sevilla', '23.11.2014', 'Home'],
['Valencia', '30.11.2014', 'Away'], ['Espanyol', '7.12.2014', 'Home'],
['Getafe', '14.12.2014', 'Away'], ['Cordoba', '21.12.2014', 'Home'],
['Rea Sociedad', '4.1.2015', 'Away'], ['Atletico Madrid', '11.1.2015', 'Home'],
['Deportivo', '18.1.2015', 'Away'], ['Elche', '25.1.2015', 'Away'], ['Villarreal', '1.2.2015', 'Home'],
['Athletic Bilbao', '8.2.2015', 'Away'], ['Levante', '15.2.2015', 'Home'],
['Malaga', '22.2.2015', 'Home'], ['Granada', '1.3.2015', 'Away'],
['Rayo Vallecano', '8.3.2015', 'Home'], ['Eibar', '15.3.2015', 'Away'],
['Real Madrid', '22.3.2015', 'Home'], ['Celta Vigo', '5.4.2015', 'Away'],
['Almeria', '8.4.2015', 'Home'], ['Sevilla', '12.4.2015', 'Away'], ['Valencia', '19.4.2015', 'Home'],
['Espanyol', '26.4.2015', 'Away'], ['Getafe', '29.4.2015', 'Home'], ['Cordoba', '3.5.2015', 'Away'],
['Real Sociedad', '10.5.2015', 'Home'], ['Atletico Madrid', '17.5.2015', 'Away'],
['Deportivo', '24.5.2015', 'Home']]},
{"Liverpool": [['Southampton', '16.8.2014', 'Home'], ['Manchester City', '23.8.2014', 'Away'],
['Tottenham Hotspur', '30.8.2014', 'Away'], ['Aston Villa', '13.9.2014', 'Home'],
['West Ham United', '20.9.2014', 'Away'], ['Everton', '27.9.2014', 'Home'],
['West Bromwich Albion', '04.10.2014', 'Home'], ['Queens Park Rangers', '18.10.2014', 'Away'],
['Hull City', '25.10.2014', 'Home'], ['Newcastle United', '1.11.2014', 'Away'],
['Chelsea', '8.11.2014', 'Home'], ['Crystal Palace', '22.11.2014', 'Away'],
['Stoke City', '29.11.2014', 'Home'], ['Leicester City', '2.12.2014', 'Away'],
['Sunderland', '6.12.2014', 'Home'], ['Manchester United', '13.12.2014', 'Away'],
['Arsenal', '20.12.2014', 'Home'], ['Burnley', '26.12.2014', 'Away'],
['Swansea City', '28.12.2014', 'Home'], ['Leicester City', '1.1.2015', 'Home'],
['Sunderland', '10.1.2015', 'Away'], ['Aston Villa', '17.1.2015', 'Away'],
['West Ham United', '31.1.2015', 'Home'], ['Everton', '7.2.2015', 'Away'],
['Tottenham Hotspur', '10.2.2015', 'Home'], ['Southampton', '21.2.2015', 'Away'],
['Manchester City', '28.2.2015', 'Home'], ['Burnley', '3.3.2015', 'Home'],
['Swansea City', '14.3.2015', 'Away'], ['Manchester United', '21.3.2015', 'Home'],
['Arsenal', '4.4.2015', 'Away'], ['Newcastle United', '11.4.2015', 'Home'],
['Hull City', '18.4.2015', 'Away'], ['West Bromwich Albion', '25.4.2015', 'Away'],
['Queens Park Rangers', '2.5.2015', 'Home'], ['Chelsea', '9.5.2015', 'Away'],
['Crystal Palace', '16.5.2015', 'Home'], ['Stoke City', '24.5.2015', 'Away']]},
{'Chelsea':[['Burnley', '16.8.2014', 'Away'], ['Leicester City', '23.8.2014', 'Home'],
['Everton', '30.8.2014', 'Away'], ['Swansea City', '13.9.2014', 'Home'],
['Manchester City', '20.9.2014', 'Away'], ['Aston Villa', '27.9.2014', 'Home'],
['Arsenal', '4.10.2014', 'Home'], ['Crystal Palace', '18.10.2014', 'Away'],
['Manchester United', '25.10.2014', 'Away'], ['Queens Park Rangers', '1.11.2014', 'Home'],
['Liverpool', '8.11.2014', 'Away'], ['West Bromwich Albion', '22.11.2014', 'Home'],
['Sunderland', '29.11.2014', 'Away'], ['Tottenham Hotspur', '3.12.2014', 'Home'],
['Newcastle United', '6.12.2014', 'Away'], ['Hull City', '13.12.2014', 'Home'],
['Stoke City', '20.12.2014', 'Away'], ['West Ham United', '26.12.2014', 'Home'],
['Southampton', '28.12.2014', 'Away'], ['Tottenham Hotspur', '1.1.2015', 'Away'],
['Newcastle United', '10.1.2015', 'Home'], ['Swansea City', '17.1.2015', 'Away'],
['Manchester City', '31.1.2015', 'Home'], ['Aston Villa', '7.2.2015', 'Away'],
['Everton','11.2.2015', 'Home'], ['Burnley', '21.2.2015', 'Home'],
['Leicester City', '28.2.2015', 'Away'], ['West Ham United', '3.3.2015', 'Away'],
['Southampton', '14.3.2015', 'Home'], ['Hull City', '21.3.2015', 'Away'],
['Stoke City', '4.4.2015', 'Home'], ['Queens Park Rangers', '11.4.2015', 'Away'],
['Manchester United', '18.4.2015', 'Home'], ['Arsenal', '25.4.2015', 'Away'],
['Crystal Palace', '2.5.2015', 'Home'], ['Liverpool', '9.5.2015', 'Home'],
['West Bromwich Albion', '16.5.2015', 'Away'], ['Sunderland', '24.5.2015', 'Home']]}]
|
supercr7/foot-fixtures
|
teams_prac.py
|
Python
|
mit
| 7,324
|
[
"CRYSTAL"
] |
0888a50584edd68bbd24e0e2493eefbbcf9324ecb669e632ce1c15b2dcb2b3bf
|
# Based on this example: http://www.vtk.org/Wiki/VTK/Examples/Python/Widgets/EmbedPyQt2
from PySide import QtCore, QtGui
import vtk
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(603, 553)
self.centralWidget = QtGui.QWidget(MainWindow)
self.gridlayout = QtGui.QGridLayout(self.centralWidget)
self.vtkWidget = QVTKRenderWindowInteractor(self.centralWidget)
self.gridlayout.addWidget(self.vtkWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralWidget)
class SimpleVtkViewer(QtGui.QMainWindow):
"""
SimpleVtkViewer uses a VTK QVTKRenderWindowInteractor to provide interactive
rendeirng of VTK props in a QT window. For keyboard and mouse interaction
instructions see https://github.com/Kitware/VTK/blob/master/Wrapping/Python/vtk/qt4/QVTKRenderWindowInteractor.py.
Note, it seems the 'a' key rather than the 'o' key activates object/actor mode
to enable interactive moving of rendered shapes.
"""
def __init__(self, parent = None):
QtGui.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ren = vtk.vtkRenderer()
self.ui.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.ui.vtkWidget.GetRenderWindow().GetInteractor()
self.axes = vtk.vtkAxesActor()
self.add_actor(self.axes)
self._axes_visible = True
self.axes.SetConeRadius(0)
self.show()
self.iren.Initialize()
def add_actor(self, actor):
self.ren.AddActor(actor)
self.iren.Render()
def hide_actor(self, actor):
self.ren.RemoveActor(actor)
self.iren.Render()
def clear_view(self):
self.ren.RemoveAllViewProps()
if self._axes_visible:
self.show_axes()
def show_axes(self):
self.add_actor(self.axes)
self._axes_visible = True
def hide_axes(self):
self.hide_actor(self.axes)
self._axes_visible = False
def refresh_view(self):
self.iren.Render()
def create_test_actor():
# Create source
source = vtk.vtkSphereSource()
source.SetCenter(0, 0, 0)
source.SetRadius(5.0)
# Create a mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
# Create an actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
|
heartvalve/occmodel
|
examples/viewer/vtkviewer.py
|
Python
|
gpl-2.0
| 2,593
|
[
"VTK"
] |
8615424d06537815633c2b7439ea749c86884df72954509ada12747510c314e8
|
# Implements auto-encoding variational Bayes.
from __future__ import absolute_import, division
from __future__ import print_function
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.norm as norm
from autograd.scipy.special import expit as sigmoid
from autograd import grad
from autograd.misc.optimizers import adam
from data import load_mnist, save_images
def diag_gaussian_log_density(x, mu, log_std):
return np.sum(norm.logpdf(x, mu, np.exp(log_std)), axis=-1)
def unpack_gaussian_params(params):
# Params of a diagonal Gaussian.
D = np.shape(params)[-1] // 2
mean, log_std = params[:, :D], params[:, D:]
return mean, log_std
def sample_diag_gaussian(mean, log_std, rs):
return rs.randn(*mean.shape) * np.exp(log_std) + mean
def bernoulli_log_density(targets, unnormalized_logprobs):
# unnormalized_logprobs are in R
# Targets must be -1 or 1
label_probabilities = -np.logaddexp(0, -unnormalized_logprobs*targets)
return np.sum(label_probabilities, axis=-1) # Sum across pixels.
def relu(x): return np.maximum(0, x)
def init_net_params(scale, layer_sizes, rs=npr.RandomState(0)):
"""Build a (weights, biases) tuples for all layers."""
return [(scale * rs.randn(m, n), # weight matrix
scale * rs.randn(n)) # bias vector
for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]
def batch_normalize(activations):
mbmean = np.mean(activations, axis=0, keepdims=True)
return (activations - mbmean) / (np.std(activations, axis=0, keepdims=True) + 1)
def neural_net_predict(params, inputs):
"""Params is a list of (weights, bias) tuples.
inputs is an (N x D) matrix.
Applies batch normalization to every layer but the last."""
for W, b in params[:-1]:
outputs = batch_normalize(np.dot(inputs, W) + b) # linear transformation
inputs = relu(outputs) # nonlinear transformation
outW, outb = params[-1]
outputs = np.dot(inputs, outW) + outb
return outputs
def nn_predict_gaussian(params, inputs):
# Returns means and diagonal variances
return unpack_gaussian_params(neural_net_predict(params, inputs))
def generate_from_prior(gen_params, num_samples, noise_dim, rs):
latents = rs.randn(num_samples, noise_dim)
return sigmoid(neural_net_predict(gen_params, latents))
def p_images_given_latents(gen_params, images, latents):
preds = neural_net_predict(gen_params, latents)
return bernoulli_log_density(images, preds)
def vae_lower_bound(gen_params, rec_params, data, rs):
# We use a simple Monte Carlo estimate of the KL
# divergence from the prior.
q_means, q_log_stds = nn_predict_gaussian(rec_params, data)
latents = sample_diag_gaussian(q_means, q_log_stds, rs)
q_latents = diag_gaussian_log_density(latents, q_means, q_log_stds)
p_latents = diag_gaussian_log_density(latents, 0, 0)
likelihood = p_images_given_latents(gen_params, data, latents)
return np.mean(p_latents + likelihood - q_latents)
if __name__ == '__main__':
# Model hyper-parameters
latent_dim = 10
data_dim = 784 # How many pixels in each image (28x28).
gen_layer_sizes = [latent_dim, 300, 200, data_dim]
rec_layer_sizes = [data_dim, 200, 300, latent_dim * 2]
# Training parameters
param_scale = 0.01
batch_size = 200
num_epochs = 15
step_size = 0.001
print("Loading training data...")
N, train_images, _, test_images, _ = load_mnist()
def binarise(images):
on = images > 0.5
images = images * 0 - 1
images[on] = 1.0
return images
print("Binarising training data...")
train_images = binarise(train_images)
test_images = binarise(test_images)
init_gen_params = init_net_params(param_scale, gen_layer_sizes)
init_rec_params = init_net_params(param_scale, rec_layer_sizes)
combined_init_params = (init_gen_params, init_rec_params)
num_batches = int(np.ceil(len(train_images) / batch_size))
def batch_indices(iter):
idx = iter % num_batches
return slice(idx * batch_size, (idx+1) * batch_size)
# Define training objective
seed = npr.RandomState(0)
def objective(combined_params, iter):
data_idx = batch_indices(iter)
gen_params, rec_params = combined_params
return -vae_lower_bound(gen_params, rec_params, train_images[data_idx], seed) / data_dim
# Get gradients of objective using autograd.
objective_grad = grad(objective)
print(" Epoch | Objective | Test ELBO ")
def print_perf(combined_params, iter, grad):
if iter % 10 == 0:
gen_params, rec_params = combined_params
bound = np.mean(objective(combined_params, iter))
message = "{:15}|{:20}|".format(iter//num_batches, bound)
if iter % 100 == 0:
test_bound = -vae_lower_bound(gen_params, rec_params, test_images, seed) / data_dim
message += "{:20}".format(test_bound)
print(message)
fake_data = generate_from_prior(gen_params, 20, latent_dim, seed)
save_images(fake_data, 'vae_samples.png', vmin=0, vmax=1)
# The optimizers provided can optimize lists, tuples, or dicts of parameters.
optimized_params = adam(objective_grad, combined_init_params, step_size=step_size,
num_iters=num_epochs * num_batches, callback=print_perf)
|
HIPS/autograd
|
examples/variational_autoencoder.py
|
Python
|
mit
| 5,491
|
[
"Gaussian"
] |
c5005c84e9278c4d99d4516693cdaa74da0a6eda4f537cc2313c3f9c3692d966
|
"""General plotting functions.
Functions
---------
- set_axis - Set many different axis properties at once.
- twin_axis - Easily create and set a new twin axis (like `twinx()` or `twiny()`)
- set_lim - Set limits on an axis
- set_ticks
- zoom - Zoom-in at a certain location on the given axes.
- text - Add text to figure.
- label_line - Add text to line
- legend - Add a legend to the given figure.
- color_cycle - Create a range of colors.
- colormap - Create a colormap from scalars to colors.
- cut_colormap - Select a truncated subset of the given colormap.
- color_set - Retrieve a (small) set of color-strings with hand picked values.
- set_grid - Configure the axes' grid.
- scientific_notation - Convert a scalar into a string with scientific notation.
- line_style_set - Retrieve a set of line-style specifications.
- line_label - Plot a vertical line, and give it a label outside the axes.
- skipTicks - skip some tick marks
- saveFigure - Save the given figure(s) to the given filename.
- stretchAxes - Stretch the `x` and/or `y` limits of the given axes by a scaling factor.
- unifyAxesLimits - Set limits on all given axes to match global extrema.
- _setAxis_scale -
- _setAxis_label -
- _clear_frame -
- _scale_to_log_flag -
- _clean_scale -
- _get_cmap - Retrieve a colormap with the given name if it is not already a colormap.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import logging
import warnings
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
import zcode.math as zmath
import zcode.inout as zio
from zcode import utils
from zcode.plot.layout import _loc_str_to_pars, _parse_align
from zcode.plot import _PAD
__all__ = ['axis_next_color', 'color_lightness',
'figax', 'set_axis', 'twin_axis', 'set_lim', 'set_ticks', 'zoom',
'stretchAxes', 'text', 'label_line', 'legend', 'invert_color',
'color_cycle', 'get_norm',
'smap', 'color_set', 'set_grid', 'save_fig',
'skipTicks', 'saveFigure', 'scientific_notation',
'line_style_set', 'line_label', 'unify_axes_limits',
'_scale_to_log_flag',
# Deprecated
'colormap'
]
VALID_SIDES = [None, 'left', 'right', 'top', 'bottom']
_COLOR_SET = ['blue', 'red', 'green', 'purple',
'orange', 'cyan', 'brown', 'gold', 'pink',
'forestgreen', 'grey', 'olive', 'coral', 'yellow']
_COLOR_SET_XKCD = ["blue", "red", "green", "purple", "orange", "cyan",
"pink", "brown", "magenta", "amber", "slate blue",
"teal", "light blue", "lavender", "rose", "turquoise", "azure",
"lime green", "greyish", "windows blue",
"faded green", "mustard", "brick red", "dusty purple"]
_LS_DASH_BIG = 7
_LS_DASH_MED = 5
_LS_DASH_SML = 3
_LS_DOT = 1
_LINE_STYLE_SET = [
None,
(0, [_LS_DASH_BIG, 4]),
(0, [_LS_DOT, 1]),
(0, [_LS_DOT, 1, _LS_DASH_MED, 1]),
(0, [_LS_DOT, 1, _LS_DOT, 1, _LS_DASH_MED, 1]),
(0, [_LS_DOT, 1, _LS_DOT, 1, _LS_DOT, 1, _LS_DASH_MED, 1]),
(0, [_LS_DOT, 1, _LS_DOT, 1, _LS_DOT, 1, _LS_DOT, 1, _LS_DASH_MED, 1]),
(0, [_LS_DASH_MED, 2]),
(0, [_LS_DASH_SML, 1, _LS_DASH_MED, 1]),
(0, [_LS_DOT, 1, _LS_DASH_SML, 1, _LS_DASH_MED, 1]),
(0, [_LS_DASH_SML, 1]),
(0, [_LS_DOT, 1, _LS_DASH_SML, 1]),
(0, [_LS_DOT, 1, _LS_DOT, 1, _LS_DASH_SML, 1]),
(0, [_LS_DOT, 1, _LS_DOT, 1, _LS_DOT, 1, _LS_DASH_SML, 1]),
(0, [_LS_DOT, 1, _LS_DOT, 1, _LS_DOT, 1, _LS_DOT, 1, _LS_DASH_SML, 1]),
(0, [_LS_DOT, 4]),
(0, [_LS_DOT, 1, _LS_DOT, 4]),
(0, [_LS_DOT, 1, _LS_DOT, 1, _LS_DOT, 4]),
]
# Default length for lines in legend handles; in units of font-size
_HANDLE_LENGTH = 2.5
_HANDLE_PAD = 0.6
_LEGEND_COLUMN_SPACING = 1.2
_SCATTER_POINTS = 1
def axis_next_color(ax=None):
if ax is None:
ax = plt.gca()
return ax._get_lines.get_next_color()
def color_lightness(color, scale=None, reset=None):
"""Adjust the 'lightness' (of HLS) of a color.
From: https://stackoverflow.com/a/49601444/230468
"""
import colorsys
try:
col = mpl.colors.cnames[color]
except:
col = color
col = colorsys.rgb_to_hls(*mpl.colors.to_rgb(col))
if (scale is None) and (reset is None):
raise ValueError("either `scale` or `reset` must be given!")
elif (scale is not None) and (reset is not None):
raise ValueError("only one of `scale` or `reset` may be given!")
if scale is not None:
val = scale * col[1]
else:
val = reset
val = np.clip(val, 0.0, 1.0)
col = colorsys.hls_to_rgb(col[0], val, col[2])
return col
def figax(figsize=[12, 6], ncols=1, nrows=1, sharex=False, sharey=False, squeeze=True, scale=None,
xscale='log', xlabel='', xlim=None,
yscale='log', ylabel='', ylim=None,
widths=None, heights=None,
left=None, bottom=None, right=None, top=None, hspace=None, wspace=None,
grid=True, **kwargs):
if scale is not None:
xscale = scale
yscale = scale
scales = [xscale, yscale]
for ii in range(2):
if scales[ii].startswith('lin'):
scales[ii] = 'linear'
xscale, yscale = scales
if (widths is not None) or (heights is not None):
gridspec_kw = dict()
if widths is not None:
gridspec_kw['width_ratios'] = widths
if heights is not None:
gridspec_kw['height_ratios'] = heights
kwargs['gridspec_kw'] = gridspec_kw
fig, axes = plt.subplots(figsize=figsize, squeeze=False, ncols=ncols, nrows=nrows,
sharex=sharex, sharey=sharey, **kwargs)
plt.subplots_adjust(
left=left, bottom=bottom, right=right, top=top, hspace=hspace, wspace=wspace)
if ylim is not None:
shape = (nrows, ncols, 2)
if np.shape(ylim) == (2,):
ylim = np.array(ylim)[np.newaxis, np.newaxis, :]
else:
shape = (nrows, ncols,)
ylim = np.broadcast_to(ylim, shape)
if xlim is not None:
shape = (nrows, ncols, 2)
if np.shape(xlim) == (2,):
xlim = np.array(xlim)[np.newaxis, np.newaxis, :]
else:
shape = (nrows, ncols)
xlim = np.broadcast_to(xlim, shape)
# _, xscale, xlabel, xlim = np.broadcast_arrays(axes, xscale, xlabel, xlim)
# _, yscale, ylabel, ylim = np.broadcast_arrays(axes, yscale, ylabel, ylim)
_, xscale, xlabel = np.broadcast_arrays(axes, xscale, xlabel)
_, yscale, ylabel = np.broadcast_arrays(axes, yscale, ylabel)
for idx, ax in np.ndenumerate(axes):
# print(idx, xscale[idx], xlabel[idx], xlim[idx])
# print(idx, yscale[idx], ylabel[idx], ylim[idx])
ax.set(xscale=xscale[idx], xlabel=xlabel[idx],
yscale=yscale[idx], ylabel=ylabel[idx])
if xlim[idx] is not None:
ax.set_xlim(xlim[idx])
if ylim[idx] is not None:
ax.set_ylim(ylim[idx])
if grid is not None:
if grid in [True, False]:
set_grid(ax, grid)
else:
set_grid(ax, **grid)
if squeeze:
axes = np.squeeze(axes)
if np.ndim(axes) == 0:
axes = axes[()]
return fig, axes
def set_axis(ax, axis='x', pos=None, trans='axes', label=None, scale=None, fs=None,
thresh=None, side=None, grid=True, lim=None, invert=False, ticks=True, stretch=1.0,
**kwargs):
"""
Configure a particular axis of the given axes object.
Arguments
---------
ax : <matplotlib.axes.Axes>, base axes object to modify
axis : <str>, which axis to target {``x`` or ``y``}
color : <str>, color for the axis (see ``matplotlib.colors``)
fs : <int>, font size for labels
pos : <float>, position of axis-label/lines relative to the axes object
trans : <str>, transformation type for the axes
label : <str>, axes label (``None`` means blank)
scale : <str>, axis scale, e.g. 'log', (``None`` means default)
thresh : <float>, for 'symlog' scaling, the threshold for the linear segment
side : <str>, where to place the markings, {``left``, ``right``, ``top``, ``bottom``}
ts : <int>, tick-size (for the major ticks only)
grid : <bool>, whether grid lines should be enabled
lim : <float>[2], limits for the axis range
invert : <bool>, whether to invert this axis direction (i.e. high to low)
ticks
stretch : <flt>,
"""
assert axis in ['x', 'y'], "``axis`` must be `x` or `y`!"
assert trans in ['axes', 'figure'], "``trans`` must be `axes` or `figure`!"
assert side in VALID_SIDES, "``side`` must be in '%s'" % (VALID_SIDES)
color = _color_from_kwargs(kwargs, pop=True)
if color is None:
color = 'k'
if len(kwargs) > 0:
raise ValueError("Additional arguments are not supported!")
# Set tick colors and font-sizes
kw = {}
if fs is not None:
kw['labelsize'] = fs
ax.tick_params(axis=axis, which='both', colors=color, **kw)
# Set tick-size only for major ticks
# ax.tick_params(axis=axis, which='major')
# Set Grid Lines
set_grid(ax, grid, axis='both')
if axis == 'x':
ax.xaxis.label.set_color(color)
offt = ax.get_xaxis().get_offset_text()
if side is None:
if pos is None:
side = 'bottom'
else:
if pos < 0.5:
side = 'bottom'
else:
side = 'top'
if pos is not None:
offt.set_y(pos)
ax.xaxis.set_label_position(side)
ax.xaxis.set_ticks_position(side)
if lim is not None:
if np.size(lim) > 2:
lim = zmath.minmax(lim)
ax.set_xlim(lim)
if invert:
ax.invert_xaxis()
if not ticks:
for tlab in ax.xaxis.get_ticklabels():
tlab.set_visible(False)
else:
ax.yaxis.label.set_color(color)
offt = ax.get_yaxis().get_offset_text()
if side is None:
if pos is None:
side = 'left'
else:
if pos < 0.5:
side = 'left'
else:
side = 'right'
if pos is not None:
offt.set_x(pos)
ax.yaxis.set_label_position(side)
ax.yaxis.set_ticks_position(side)
if lim is not None:
ax.set_ylim(lim)
if invert:
ax.invert_yaxis()
if not ticks:
for tlab in ax.yaxis.get_ticklabels():
tlab.set_visible(False)
# Set Spine colors
ax.spines[side].set_color(color)
if pos is not None:
ax.set_frame_on(True)
ax.spines[side].set_position((trans, pos))
ax.spines[side].set_visible(True)
ax.patch.set_visible(False)
# Set Axis Scaling
if scale is not None:
_setAxis_scale(ax, axis, scale, thresh=thresh)
# Set Axis Label
if label is not None:
kw = {}
if fs is not None:
kw['fs'] = fs
_setAxis_label(ax, axis, label, color=color, **kw)
if not np.isclose(stretch, 1.0):
if axis == 'x':
ax = stretchAxes(ax, xs=stretch)
elif axis == 'y':
ax = stretchAxes(ax, ys=stretch)
offt.set_color(color)
return ax
def twin_axis(ax, axis='x', pos=1.0, **kwargs):
"""
"""
if axis == 'x':
tw = ax.twinx()
setax = 'y'
store_name = "_twinx"
elif axis == 'y':
tw = ax.twiny()
setax = 'x'
store_name = "_twiny"
else:
raise RuntimeError("``axis`` must be either {`x` or `y`}!")
tw = set_axis(tw, axis=setax, pos=pos, **kwargs)
if not hasattr(ax, store_name):
setattr(ax, store_name, [tw])
else:
getattr(ax, store_name).append(tw)
return tw
def set_lim(ax, axis='y', lo=None, hi=None, data=None, range=False, at='exactly', invert=False):
"""Set the limits (range) of the given, target axis.
When only ``lo`` or only ``hi`` is specified, the default behavior is to only set that axis
limit and leave the other bound to its existing value. When ``range`` is set to `True`, then
the given axis boumds (``lo``/``hi``) are used as multipliers, i.e.
>>> Plotting.set_lim(ax, lo=0.1, range=True, at='exactly')
will set the lower bound to be `0.1` times the existing upper bound
The ``at`` keyword determines whether the given bounds are treated as limits to the bounds,
or as fixed ('exact') values, i.e.
>>> Plotting.set_lim(ax, lo=0.1, range=True, at='most')
will set the lower bound to at-'most' `0.1` times the existing upper bound. If the lower
bound is already 0.05 times the upper bound, it will not be changed.
Arguments
---------
ax : <matplotlib.axes.Axes>, base axes object to modify
axis : <str>{'x','y'}, which axis to set
lo : <scalar>, lower limit bound
hi : <scalar>, higher (upper) limit bound
data : <scalar>[N], range of data values from which to use max and min
range : <bool>, set the 'range' of the axis limits (True) or set the bounds explicitly
at : <str>{'least', 'exactly', 'most'}, how to treat the given bounds - limits or exactly
"""
AT_LEAST = 'least'
AT_MOST = 'most'
AT_EXACTLY = 'exactly'
AT_VALID = [AT_LEAST, AT_EXACTLY, AT_MOST]
assert at in AT_VALID, "``at`` must be in {'%s'}!" % (str(AT_VALID))
if axis == 'y':
get_lim = ax.get_ylim
set_lim = ax.set_ylim
elif axis == 'x':
get_lim = ax.get_xlim
set_lim = ax.set_xlim
else:
raise RuntimeError("``axis`` must be either 'x' or 'y'!")
lims = np.array(get_lim())
# Set Range/Span of Limits
if range:
if lo is not None:
if at == AT_EXACTLY:
lims[0] = lims[1]/lo
elif at == AT_LEAST:
lims[0] = np.max([lims[0], lims[0]/lo])
elif at == AT_MOST:
lims[0] = np.min([lims[0], lims[0]/lo])
elif hi is not None:
if at == AT_EXACTLY:
lims[1] = lims[1]*hi
elif at == AT_LEAST:
lims[1] = np.max([lims[1], lims[1]*hi])
elif at == AT_MOST:
lims[1] = np.min([lims[1], lims[1]*hi])
else:
raise RuntimeError("``lo`` or ``hi`` must be provided!")
# Set Limits explicitly
else:
if lo is not None:
if at == AT_EXACTLY:
lims[0] = lo
elif at == AT_LEAST:
lims[0] = np.max([lims[0], lo])
elif at == AT_MOST:
lims[0] = np.min([lims[0], lo])
else:
raise ValueError("Unrecognized `at` = '%s'" % (at))
elif data is not None:
lims[0] = np.min(data)
if hi is not None:
if at == AT_EXACTLY:
lims[1] = hi
elif at == AT_LEAST:
lims[1] = np.max([lims[1], hi])
elif at == AT_MOST:
lims[1] = np.min([lims[1], hi])
else:
raise ValueError("Unrecognized `at` = '%s'" % (at))
elif data is not None:
lims[1] = np.max(data)
# Actually set the axes limits
set_lim(lims)
if invert:
if axis == 'x':
ax.invert_xaxis()
else:
ax.invert_yaxis()
return
def set_ticks(ax, axis='y', every=2, log=True):
"""DEV
"""
if axis != 'y': raise ValueError("Only 'y' axis currently supported.")
if not log: raise ValueError("Only `log` scaling currently supported.")
ylims = np.array(ax.get_ylim())
man, exp = zmath.frexp10(ylims[0])
low = np.int(exp)
man, exp = zmath.frexp10(ylims[1])
high = np.int(exp)
vals = np.arange(low, high, every)
vals = np.power(10.0, vals)
ax.set_yticks(vals)
return
def zoom(ax, loc, axis='x', scale=2.0):
"""Zoom-in at a certain location on the given axes.
"""
# Choose functions based on target axis
if axis == 'x':
axScale = ax.get_xscale()
lim = ax.get_xlim()
set_lim = ax.set_xlim
elif axis == 'y':
axScale = ax.get_yscale()
lim = ax.get_ylim()
set_lim = ax.set_ylim
else:
raise ValueError("Unrecognized ``axis`` = '%s'!!" % (str(axis)))
lim = np.array(lim)
# Determine axis scaling
if axScale.startswith('lin'):
log = False
elif axScale.startswith('log'):
log = True
else:
raise ValueError("``axScale`` '%s' not implemented!" % (str(axScale)))
# Convert to log if appropriate
if log:
lim = np.log10(lim)
loc = np.log10(loc)
# Find new axis bounds
delta = np.diff(zmath.minmax(lim))[0]
lim = np.array([loc - (0.5/scale)*delta, loc + (0.5/scale)*delta])
# Convert back to linear if appropriate
if log: lim = np.power(10.0, lim)
set_lim(lim)
return lim
def stretchAxes(ax, xs=1.0, ys=1.0):
"""
Stretch the `x` and/or `y` limits of the given axes by a scaling factor.
"""
xlog = (ax.get_xscale() == 'log')
ylog = (ax.get_yscale() == 'log')
xlims = np.array(ax.get_xlim())
ylims = np.array(ax.get_ylim())
if xlog: xlims = np.log10(xlims)
if ylog: ylims = np.log10(ylims)
xlims = [xlims[0] + 0.5*(1.0-xs)*(xlims[1]-xlims[0]),
xlims[1] + 0.5*(1.0-xs)*(xlims[0]-xlims[1])]
ylims = [ylims[0] + 0.5*(1.0-ys)*(ylims[1]-ylims[0]),
ylims[1] + 0.5*(1.0-ys)*(ylims[0]-ylims[1])]
if xlog: xlims = np.power(10.0, xlims)
if ylog: ylims = np.power(10.0, ylims)
ax.set_xlim(xlims)
ax.set_ylim(ylims)
return ax
def text(art, pstr, loc=None, x=None, y=None, halign=None, valign=None,
fs=None, trans=None, pad=None, shift=None, **kwargs):
"""Add text to figure.
Wrapper for the `matplotlib.figure.Figure.text` method.
Arguments
---------
art : `matplotlib.figure.Figure` or `matplotlib.axes.Axes` object,
pstr : str,
String to be printed.
loc : str,
String with two letters specifying the horizontal and vertical positioning of the text.
x : float,
X-position at which to draw the string, relative to the transformation given by `trans`.
y : float,
Y-position at which to draw the string, relative to the transformation given by `trans`.
halign : str, one of {'center', 'left', 'right'},
Horizontal alignment of text.
valign : str, one of {'center', 'bottom', 'top'},
Vertical alignment of text.
fs : int,
Fontsize.
trans : `matplotlib.BboxTransformTo` object, or `None`,
Transformation to use for text placement.
pad : scalar, (2,) scalar, or `None`
Padding between edges of artist and the text object.
If two elements are given, they are interpretted as [xpad, ypad].
shift : (2,) scalar or `None`
Adjust the (x,y) position of the text by this amount.
kwargs : any,
Additional named arguments passed to `matplotlib.figure.Figure.text`.
For example, ``color='blue'``, or ``rotation=90``.
Returns
-------
txt : ``matplotlib.text.Text`` object,
Handle storing the drawn text.
"""
# if trans is None: trans = fig.transFigure
if trans is None:
trans = kwargs.pop('transform', None)
if fs is not None:
if 'size' in kwargs:
raise KeyError("Cannot provide both `fs` and `size`!")
kwargs['size'] = fs
if trans is None:
if isinstance(art, mpl.figure.Figure):
trans = art.transFigure
elif isinstance(art, mpl.axes.Axes):
trans = art.transAxes
if pad is None:
pad = _PAD
pad = np.atleast_1d(pad)
if pad.size == 1:
pad = np.concatenate([pad, pad])
# If a location string is given, convert to parameters
if loc is not None:
x, y, halign, valign = _loc_str_to_pars(
loc, x=x, y=y, halign=halign, valign=valign, pad=pad)
# Set default values
if x is None:
x = 0.5
if y is None:
y = 1 - pad[1]
if shift is not None:
x += shift[0]
y += shift[1]
halign, valign = _parse_align(halign, valign)
txt = art.text(x, y, pstr, transform=trans,
horizontalalignment=halign, verticalalignment=valign, **kwargs)
return txt
'''
def label_line(ax, line, label, x=None, y=None,
color='0.5', fs=14, halign='left', scale='linear', clip_on=True,
halign_scale=1.0, rotate=True, log=None):
"""Add an annotation to the given line with appropriate placement and rotation.
Based on code from:
[How to rotate matplotlib annotation to match a line?]
(http://stackoverflow.com/a/18800233/230468)
User: [Adam](http://stackoverflow.com/users/321772/adam)
NOTE: this doesnt work if the line's data have a non-data-unit transformation, for example
from a line created using `axvline` or `axhline`.
Arguments
---------
ax : `matplotlib.axes.Axes` object
Axes on which the label should be added.
line : `matplotlib.lines.Line2D` object
Line which is being labeled.
label : str
Text which should be drawn as the label.
...
Returns
-------
text : `matplotlib.text.Text` object
"""
xlim = np.array(ax.get_xlim())
ylim = np.array(ax.get_ylim())
xdata, ydata = line.get_data()
x1 = xdata[0]
x2 = xdata[-1]
y1 = ydata[0]
y2 = ydata[-1]
# Limit the edges to the plotted area
x1, x2 = zmath.limit([x1, x2], xlim)
y1, y2 = np.interp([x1, x2], xdata, ydata)
y1, y2 = zmath.limit([y1, y2], ylim)
x1, x2 = np.interp([y1, y2], ydata, xdata)
log_flag = _scale_to_log_flag(scale)
if halign.startswith('l'):
if x is None:
x = x1*halign_scale
halign = 'left'
elif halign.startswith('r'):
if x is None:
x = halign_scale*x2
halign = 'right'
elif halign.startswith('c'):
if x is None:
x = zmath.midpoints([x1, x2], log=log_flag)*halign_scale
halign = 'center'
else:
raise ValueError("Unrecognized `halign` = '{}'.".format(halign))
if log is not None:
log.warning("x = {}, y = {}, xdata = {}, ydata = {}".format(x, y, xdata, ydata))
# y = np.interp(x, xdata, ydata) if y is None else y
y = zmath.interp(x, xdata, ydata, xlog=log_flag, ylog=log_flag) if y is None else y
# print("y = ", y)
# Add Annotation to Text
xytext = (0, 0)
text = ax.annotate(label, xy=(x, y), xytext=xytext, textcoords='offset points',
size=fs, color=color, zorder=1, clip_on=clip_on,
horizontalalignment=halign, verticalalignment='center_baseline')
if log is not None:
log.warning("label xy = {}, xytext = {}".format((x, y), xytext))
if rotate:
sp1 = ax.transData.transform_point((x1, y1))
sp2 = ax.transData.transform_point((x2, y2))
rise = (sp2[1] - sp1[1])
run = (sp2[0] - sp1[0])
slope_degrees = np.degrees(np.arctan2(rise, run))
text.set_rotation_mode('anchor')
text.set_rotation(slope_degrees)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return text
'''
def label_line(ax, line, label, x=None, y=None, dx=0.0, dy=0.0, rotate=True, **kwargs):
"""Add an annotation to the given line with appropriate placement and rotation.
Based on code from:
[How to rotate matplotlib annotation to match a line?]
(http://stackoverflow.com/a/18800233/230468)
User: [Adam](http://stackoverflow.com/users/321772/adam)
NOTE: this doesnt work if the line's data have a non-data-unit transformation, for example
from a line created using `axvline` or `axhline`.
Arguments
---------
ax : `matplotlib.axes.Axes` object
Axes on which the label should be added.
line : `matplotlib.lines.Line2D` object
Line which is being labeled.
label : str
Text which should be drawn as the label.
...
Returns
-------
text : `matplotlib.text.Text` object
"""
xlim = np.array(ax.get_xlim())
ylim = np.array(ax.get_ylim())
xdata, ydata = line.get_data()
x1 = xdata[0]
x2 = xdata[-1]
y1 = ydata[0]
y2 = ydata[-1]
'''
# Limit the edges to the plotted area
x1, x2 = zmath.limit([x1, x2], xlim)
y1, y2 = np.interp([x1, x2], xdata, ydata)
y1, y2 = zmath.limit([y1, y2], ylim)
x1, x2 = np.interp([y1, y2], ydata, xdata)
'''
xscale = ax.get_xscale()
yscale = ax.get_yscale()
xlog = xscale.startswith('log')
ylog = yscale.startswith('log')
if (x is None) and (y is None):
x_d = zmath.midpoints(xlim, log=xlog)
y_d = zmath.midpoints(ylim, log=ylog)
elif (y is None) and (x is not None):
# convert from axes to data
x_d, _ = ax.transAxes.transform([x, 0.0])
x_d, _ = ax.transData.inverted().transform([x_d, 0.0])
inds = np.argsort(xdata)
y_d = zmath.interp(x_d, np.array(xdata)[inds], np.array(ydata)[inds], xlog=xlog, ylog=ylog)
elif (x is None) and (y is not None):
# convert from axes to pixels
_, y_p = ax.transAxes.transform([0.0, y])
# print("axes ==> pixs :: y={:.4f} ==> {:.4f}".format(y, y_p))
# convert from pixels to data
_, y_d = ax.transData.inverted().transform([0.0, y_p])
# print("pixs ==> data :: y={:.4f} ==> {:.4f}".format(y_p, y_d))
inds = np.argsort(ydata)
x_d = zmath.interp(y_d, ydata[inds], xdata[inds], xlog=xlog, ylog=ylog)
# print("x_d = {:.4f}".format(x_d))
# print("plot_core.label_line():x_d,y_d = {}, {}".format(x_d, y_d))
if (dx is not None) and (not np.isclose(dx, 0.0)):
# data to pixels
x_p, _ = ax.transData.transform([x_d, 0.0])
# pixels to axes
x_a, _ = ax.transAxes.inverted().transform([x_p, 0.0])
x_a += dx
# axes to pixels
x_p, _ = ax.transAxes.transform([x_a, 0.0])
# pixels to data
x_d, _ = ax.transData.inverted().transform([x_p, 0.0])
if (dy is not None) and (not np.isclose(dy, 0.0)):
# data to pixels
_, y_p = ax.transData.transform([0.0, y_d])
# pixels to axes
_, y_a = ax.transAxes.inverted().transform([0.0, y_p])
y_a += dy
# axes to pixels
_, y_p = ax.transAxes.transform([0.0, y_a])
# pixels to data
_, y_d = ax.transData.inverted().transform([0.0, y_p])
# Add Annotation to Text
# xytext = (0, 0)
xy = (x_d, y_d)
# print("plot_core.label_line():x_d,y_d = {}, {}".format(x_d, y_d), xy)
text = ax.annotate(label, xy=xy, xycoords='data', **kwargs)
# horizontalalignment=halign, verticalalignment='center_baseline')
# xytext=xytext, textcoords='offset points',
if rotate is True:
# sp1 = ax.transData.transform_point((x1, y1))
# sp2 = ax.transData.transform_point((x2, y2))
sp1 = ax.transData.transform((x1, y1))
sp2 = ax.transData.transform((x2, y2))
# print(sp1, sp2)
# sp1 = [x1, y1]
# sp2 = [x2, y2]
rise = (sp2[1] - sp1[1])
run = (sp2[0] - sp1[0])
rotate = np.degrees(np.arctan2(rise, run))
if (rotate is not False) and (rotate is not None):
text.set_rotation_mode('anchor')
text.set_rotation(rotate)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return text
def legend(art, keys, names, x=None, y=None, halign='right', valign='center',
fs=None, trans=None, prev=None,
fs_title=None, loc=None, mono=False, zorder=None, align_title=None, **kwargs):
"""Add a legend to the given figure.
Wrapper for the `matplotlib.pyplot.Legend` method.
Arguments
---------
art : `matplotlib.figure.Figure` pr `matplotlib.axes.Axes` object,
keys : array_like of artists, shape (N,)
Handles to the legend artists to be included in the legend.
names : array_like of str, shape (N,)
Names corresponding to each legend artist in `keys`.
x : float,
X-position at which to draw the legend, relative to the transformation given by `trans`.
y : float,
Y-position at which to draw the legend, relative to the transformation given by `trans`.
halign : str, one of {'center', 'left', 'right'},
Horizontal alignment of legend box.
valign : str, one of {'center', 'lower', 'upper'},
Vertical alignment of legend box.
fs : int,
Fontsize.
trans : `matplotlib.BboxTransformTo` object, or `None`,
Transformation to use for legend placement.
If `None`, then it defaults to `transFigure` or `transAxes` if `art` is a 'Figure' or 'Axes'
respectively.
fs_title : int,
loc : str or 'None',
Describe the location of the legend using a string, e.g. 'tl', 'br', 'cl', 'tc'
The string must be a two letter combination, such that:
- First letter determines the vertical alingment {'t', 'b', 'c'};
- Second letter the horizontal, {'l', 'r', 'c'}.
mono : bool,
Use a monospace font for the legend strings.
kwargs : any,
Additional named arguments passed to `matplotlib.pyplot.legend`.
For example, ``ncol=1`` or ``title='Legend Title'``.
Returns
-------
leg : ``matplotlib.legend.Legend`` object,
Handle storing the drawn legend.
"""
if isinstance(art, mpl.figure.Figure):
ax = art.axes[0]
if trans is None:
trans = art.transFigure
elif isinstance(art, mpl.axes.Axes):
ax = art
if trans is None:
trans = ax.transAxes
else:
ax = art
warnings.warn("Unexpected `art` object '{}' (type: {})".format(art, type(art)))
kwargs.setdefault('handlelength', _HANDLE_LENGTH)
kwargs.setdefault('handletextpad', _HANDLE_PAD)
kwargs.setdefault('columnspacing', _LEGEND_COLUMN_SPACING)
kwargs.setdefault('scatterpoints', _SCATTER_POINTS)
kwargs.setdefault('numpoints', _SCATTER_POINTS)
kwargs.setdefault('fancybox', True)
# `alpha` should actually be `framealpha`
if 'alpha' in kwargs:
warnings.warn("For legends, use `framealpha` instead of `alpha`.")
kwargs['framealpha'] = kwargs.pop('alpha')
# Override alignment using `loc` argument
if loc is not None:
_x, _y, halign, valign = _loc_str_to_pars(loc)
else:
_x = 0.99
_y = 0.5
if valign == 'top':
valign = 'upper'
if valign == 'bottom':
valign = 'lower'
if x is None:
x = _x
if y is None:
y = _y
alignStr = valign
if not (valign == 'center' and halign == 'center'):
alignStr += " " + halign
prop_dict = {}
if fs is not None:
prop_dict['size'] = fs
if mono:
prop_dict['family'] = 'monospace'
leg = ax.legend(keys, names, prop=prop_dict, # fancybox=True,
loc=alignStr, bbox_transform=trans, bbox_to_anchor=(x, y), **kwargs)
if fs_title is not None:
plt.setp(leg.get_title(), fontsize=fs_title)
if align_title is not None:
plt.setp(leg.get_title(), multialignment=align_title)
if zorder is not None:
leg.set_zorder(10)
if prev is not None:
prev = np.atleast_1d(prev)
for pp in prev:
ax.add_artist(pp)
return leg
def unify_axes_limits(axes, axis='y'):
"""Given a list of axes, set all limits to match global extrema.
"""
assert axis in ['x', 'y'], "``axis`` must be either 'x' or 'y' !!"
if axis == 'y':
lims = np.array([ax.get_ylim() for ax in axes])
else:
lims = np.array([ax.get_xlim() for ax in axes])
lo = np.min(lims[:, 0])
hi = np.max(lims[:, 1])
for ax in axes:
if axis == 'y':
ax.set_ylim([lo, hi])
else:
ax.set_xlim([lo, hi])
return np.array([lo, hi])
def color_cycle(num, ax=None, color=None, cmap=plt.cm.Spectral,
left=0.1, right=0.9, light=True):
"""Create a range of colors.
Arguments
---------
num : int
Number of colors to put in cycle.
ax : ``matplotlib.axes.Axes`` object or `None`
Axes on which to set the colors. If given, then subsequent calls to ``ax.plot`` will use
the different colors of the color-cycle. If `None`, then the created colorcycle is only
returned.
cmap : ``matplotlib.colors.Colormap`` object
Colormap from which to select colors.
left : float {0.0, 1.0}
Start colors this fraction of the way into the colormap (to avoid black/white).
right : float {0.0, 1.0}
Stop colors at this fraction of the way through the colormap (to avoid black/white).
light : bool
If `color` is given instead of `cmap`, use a seaborn 'light' colormap (vs. 'dark').
Note: only works if `color` is given.
Returns
-------
cols : (`num`,) array_like of RGBA color tuples
Colors forming the color cycle.
"""
nums = np.linspace(left, right, num)
# If a single color is not provided, use a colormap (`cmap`)
if color is None:
cmap = _get_cmap(cmap)
# If a single color is provided, create a cycle by altering its `a[lpha]`
else:
if isinstance(color, six.string_types):
cc = mpl.colors.ColorConverter()
color = cc.to_rgba(color)
if np.size(color) == 3:
color = np.append(color, 1.0)
if np.size(color) != 4:
raise ValueError("`color` = '{}', must be a RGBA series.".format(color))
if light:
palette = sns.light_palette
else:
palette = sns.dark_palette
cmap = palette(color, n_colors=num, as_cmap=True)
cols = [cmap(it) for it in nums]
if ax is not None:
ax.set_color_cycle(cols[::-1])
return cols
def invert_color(col):
rgba = mpl.colors.to_rgba(col)
alpha = rgba[-1]
col = 1.0 - np.array(rgba[:-1])
col = tuple(col.tolist() + [alpha])
return col
def smap(args=[0.0, 1.0], cmap=None, scale=None, norm=None, midpoint=None,
under='0.8', over='0.8', left=None, right=None, filter=None):
"""Create a colormap from a scalar range to a set of colors.
Arguments
---------
args : scalar or array_like of scalar
Range of valid scalar values to normalize with
cmap : None, str, or ``matplotlib.colors.Colormap`` object
Colormap to use.
scale : str or `None`
Scaling specification of colormap {'lin', 'log', `None`}.
If `None`, scaling is inferred based on input `args`.
norm : None or `matplotlib.colors.Normalize`
Normalization to use.
under : str or `None`
Color specification for values below range.
over : str or `None`
Color specification for values above range.
left : float {0.0, 1.0} or `None`
Truncate the left edge of the colormap to this value.
If `None`, 0.0 used (if `right` is provided).
right : float {0.0, 1.0} or `None`
Truncate the right edge of the colormap to this value
If `None`, 1.0 used (if `left` is provided).
Returns
-------
smap : ``matplotlib.cm.ScalarMappable``
Scalar mappable object which contains the members:
`norm`, `cmap`, and the function `to_rgba`.
Notes
-----
- Truncation:
- If neither `left` nor `right` is given, no truncation is performed.
- If only one is given, the other is set to the extreme value: 0.0 or 1.0.
"""
args = np.asarray(args)
if scale is None:
if np.size(args) > 1 and np.all(args > 0.0):
scale = 'log'
else:
scale = 'lin'
log = _scale_to_log_flag(scale)
if not isinstance(cmap, mpl.colors.Colormap):
if cmap is None:
# cmap = 'viridis'
cmap = 'Spectral'
if isinstance(cmap, six.string_types):
import copy
cmap = copy.copy(plt.get_cmap(cmap))
# Select a truncated subsection of the colormap
if (left is not None) or (right is not None):
if left is None:
left = 0.0
if right is None:
right = 1.0
cmap = cut_colormap(cmap, left, right)
if under is not None:
cmap.set_under(under)
if over is not None:
cmap.set_over(over)
if norm is None:
norm = get_norm(args, midpoint=midpoint, log=log, filter=filter)
# Create scalar-mappable
smap = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
# Bug-Fix something something
smap._A = []
# Store type of mapping
smap.log = log
return smap
def cut_colormap(cmap, min=0.0, max=1.0, n=100):
"""Select a truncated subset of the given colormap.
Code from: http://stackoverflow.com/a/18926541/230468
Arguments
---------
cmap : `matplotlib.colors.Colormap`
Colormap to truncate
min : float, {0.0, 1.0}
Minimum edge of the colormap
max : float, {0.0, 1.0}
Maximum edge of the colormap
n : int
Number of points to use for sampling
Returns
-------
new_cmap : `matplotlib.colors.Colormap`
Truncated colormap.
"""
name = 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=min, b=max)
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
name, cmap(np.linspace(min, max, n)))
return new_cmap
def color_set(num, black=False, cset='xkcd'):
"""Retrieve a (small) set of color-strings with hand picked values.
Arguments
---------
num : int
Number of colors to retrieve.
black : bool
Include 'black' as the first color.
cset : str, {'xkcd', 'def'}
Which set of colors to choose from.
Returns
-------
cols : (`num`) list of str or RGBA tuples
List of `matplotlib` compatible color-strings or tuples.
"""
if cset == 'xkcd':
colors = list(_COLOR_SET_XKCD)
colors = sns.xkcd_palette(colors)
elif cset.startswith('def'):
colors = list(_COLOR_SET)
else:
raise ValueError("`cset` '{}' unrecognized.".format(cset))
if black:
colors = ['black'] + colors
ncol = len(colors)
# If more colors are requested than are available, fallback to `color_cycle`
if num > ncol:
# raise ValueError("Limited to {} colors, cannot produce `num` = '{}'.".format(ncol, num))
colors = color_cycle(num)
return colors
return colors[:num]
def line_style_set(num, solid=True):
"""Retrieve a (small) set of line-style specifications with hand constructed patterns.
Used by the `matplotlib.lines.Line2D.set_dashes` method.
The first element is a solid line.
Arguments
---------
num : int or `None`
Number of line-styles to retrieve.
If `None`, then all available are returned.
solid : bool
Include solid line-style.
Returns
-------
lines : (`num`) list of tuples,
Set of line-styles. Each line style is a tuple of values specifying dash spacings.
"""
_lines = list(_LINE_STYLE_SET)
# Remove solid line specification if undesired
if not solid:
_lines = _lines[1:]
nline = len(_lines)
# If more colors are requested than are available, fallback to `color_cycle`
if (num is not None) and (num > nline):
raise ValueError("Limited to {} line-styles.".format(nline))
lines = [ll for ll in _lines[:num]]
return lines
def set_grid(ax, val=True, axis='both', ls='-', clear=True,
below=True, major=True, minor=True, zorder=2, alpha=None, **kwargs):
"""Configure the axes' grid.
"""
color = _color_from_kwargs(kwargs)
if clear:
ax.grid(False, which='both', axis='both')
ax.set_axisbelow(below)
if val:
if major:
if color is None:
_col = '0.60'
else:
_col = color
if alpha is None:
_alpha = 0.4
else:
_alpha = alpha
ax.grid(True, which='major', axis=axis, c=_col, ls=ls, zorder=zorder, alpha=_alpha)
if minor:
if color is None:
_col = '0.85'
else:
_col = color
if alpha is None:
_alpha = 0.2
else:
_alpha = alpha
ax.grid(True, which='minor', axis=axis, c=_col, ls=ls, zorder=zorder, alpha=_alpha)
return
def save_fig(fig, fname, path=None, subdir=None, modify=True, verbose=True, **kwargs):
pp = path if (path is not None) else os.path.curdir
if subdir is not None:
pp = os.path.join(pp, subdir, "")
pp = zio.check_path(pp)
ff = os.path.join(pp, fname)
if modify:
ff = zio.modify_exists(ff)
ff = os.path.abspath(ff)
kwargs.setdefault('dpi', 200)
fig.savefig(ff, **kwargs)
if verbose:
print("Saved to '{}' size: {}".format(ff, zio.get_file_size(ff)))
return ff
def skipTicks(ax, axis='y', skip=2, num=None, first=None, last=None):
"""
Only label every ``skip`` tick marks.
Arguments
---------
ax <obj> : `matplotlib.axes.Axes` object, base axes class
axis <str> : which axis to modify
skip <int> : interval which to skip
num <int> : target number of tick labels (``None`` : used a fixed ``skip``)
first <bool> : If `True` always show first tick, if `False` never show, otherwise use skip
last <bool> : If `True` always show last tick, if `False` never show, otherwise use skip
"""
# Get the correct labels
if axis == 'y': ax_labels = ax.yaxis.get_ticklabels()
elif axis == 'x': ax_labels = ax.yaxis.get_ticklabels()
else: raise RuntimeError("Unrecognized ``axis`` = '%s'!!" % (axis))
count = len(ax_labels)
# Determine ``skip`` to match target number of labels
if num is not None: skip = np.int(np.ceil(1.0*count/num))
visible = np.zeros(count, dtype=bool)
# Choose some to be visible
visible[::skip] = True
if first is True: visible[0] = True
elif first is False: visible[0] = False
if last is True: visible[-1] = True
elif last is False: visible[-1] = False
for label, vis in zip(ax_labels, visible): label.set_visible(vis)
return
def saveFigure(fig, fname, verbose=True, log=None, level=logging.WARNING, close=True, **kwargs):
"""Save the given figure(s) to the given filename.
If ``fig`` is iterable, a multipage pdf is created. Otherwise a single file is made.
Does *not* make sure path exists.
Arguments
---------
fig <obj>([N]) : one or multiple ``matplotlib.figure.Figure`` objects.
fname <str> : filename to save to.
verbose <bool> : print verbose output to stdout
log <obj> : ``logging.Logger`` object to print output to
level <int> :
close <bool> : close figures after saving
**kwargs <dict> : additional arguments past to ``savefig()``.
"""
# CATCH WRONG ORDER OF ARGUMENTS
if type(fig) == str:
warnings.warn("FIRST ARGUMENT SHOULD BE `fig`!!")
temp = str(fig)
fig = fname
fname = temp
if log is not None: log.debug("Saving figure...")
if not np.iterable(fig): fig = [fig]
saved_names = []
# Save as multipage PDF
if fname.endswith('pdf') and np.size(fig) > 1:
from matplotlib.backends.backend_pdf import PdfPages
with PdfPages(fname) as pdf:
for ff in fig:
pdf.savefig(figure=ff, **kwargs)
if close: plt.close(ff)
# Make sure file now exists
if os.path.exists(fname):
saved_names.append(fname)
else:
raise RuntimeError("Figure '{}' did not save.".format(fname))
else:
# Save each figure to a different file
for ii, ff in enumerate(fig):
# On subsequent figures, append the number to the filename
if ii == 0:
usefname = str(fname)
else:
usefname = zio.modify_filename(fname, append='_%d' % (ii))
ff.savefig(usefname, **kwargs)
if close: plt.close(ff)
if os.path.exists(usefname):
saved_names.append(usefname)
else:
raise RuntimeError("Figure '{}' did not save.".format(usefname))
# No files saved or Some files were not saved
if not len(saved_names) or len(saved_names) != len(fig):
warn_str = "Error saving figures..."
if log is None:
warnings.warn(warn_str)
else:
log.warning(warn_str)
# Things look good.
else:
printStr = "Saved figure to '%s'" % (fname)
if log is not None:
log.log(level, printStr)
elif verbose:
print(printStr)
return
def scientific_notation(val, man=0, exp=0, dollar=True, one=True, zero=False, sign=False):
"""Convert a scalar into a string with scientific notation (latex formatted).
Arguments
---------
val : scalar
Numerical value to convert.
man : int or `None`
Precision of the mantissa (decimal points); or `None` for omit mantissa.
exp : int or `None`
Precision of the exponent (decimal points); or `None` for omit exponent.
dollar : bool
Include dollar-signs ('$') around returned expression.
one : bool
Include the mantissa even if it is '1[.0...]'.
zero : bool
If the value is uniformly '0.0', write it as such (instead of 10^-inf).
sign : bool
Include the sign (i.e. '+') on the mantissa even when positive.
Returns
-------
notStr : str
Scientific notation string using latex formatting.
"""
if zero and val == 0.0:
notStr = "$"*dollar + "0.0" + "$"*dollar
return notStr
val_man, val_exp = zmath.frexp10(val)
use_man = (man is not None or not np.isfinite(val_exp))
val_man = np.around(val_man, man)
if val_man >= 10.0:
val_man /= 10.0
val_exp += 1
# Construct Mantissa String
# --------------------------------
if use_man:
_sign = '+' if sign else ''
str_man = "{0:{2}.{1:d}f}".format(val_man, man, _sign)
else:
str_man = ""
# If the mantissa is '1' (or '1.0' or '1.00' etc), dont write it
if not one and str_man == "{0:.{1:d}f}".format(1.0, man):
str_man = ""
# Construct Exponent String
# --------------------------------
if (exp is not None) and np.isfinite(val_exp):
# Try to convert `val_exp` to integer, fails if 'inf' or 'nan'
try:
val_exp = np.int(val_exp)
str_exp = "10^{{ {:d} }}".format(val_exp)
except:
str_exp = "10^{{ {0:.{1:d}f} }}".format(val_exp, exp)
# Add negative sign if needed
if not use_man and (val_man < 0.0 or val == -np.inf):
str_exp = "-" + str_exp
else:
str_exp = ""
# Put them together
# --------------------------------
notStr = "$"*dollar + str_man
if len(str_man) and len(str_exp):
notStr += " \\times"
notStr += str_exp + "$"*dollar
return notStr
def line_label(ax, pos, label, dir='v', loc='top', xx=None, yy=None, ha=None, va=None,
line_kwargs={}, text_kwargs={}, dashes=None, rot=None):
"""Plot a vertical line, and give it a label outside the axes.
Arguments
---------
ax : `matplotlib.axes.Axes` object
Axes on which to plot.
xx : float
Location (in data coordinated) to place the line.
label : str
Label to place with the vertical line.
top : bool
Place the label above the axes ('True'), as apposed to below ('False').
line_kwargs : dict
Additional parameters for the line, passed to `ax.axvline`.
text_kwargs : dict
Additional parameters for the text, passed to `plot_core.text`.
dashes : array_like of float or `None`
Specification for dash/dots pattern for the line, passed to `set_dashes`.
Returns
-------
ll : `matplotlib.lines.Line2D`
Added line object.
txt : `matplotlib.text.Text`
Added text object.
"""
tdir = dir.lower()[:1]
if tdir.startswith('v'): VERT = True
elif tdir.startswith('h'): VERT = False
else: raise ValueError("`dir` ('{}') must start with {{'v', 'h'}}".format(dir))
tloc = loc.lower()[:1]
valid_locs = ['t', 'b', 'l', 'r']
if tloc not in valid_locs:
raise ValueError("`loc` ('{}') must start with '{}'".format(loc, valid_locs))
# Set default rotation
if rot is None:
rot = 0
# If to 'l'eft or 'r'ight, rotate 90-degrees
if tloc.startswith('l'): rot = 90
elif tloc.startswith('r'): rot = -90
# Set alignment
if tloc.startswith('l'):
_ha = 'right'
_va = 'center'
elif tloc.startswith('r'):
_ha = 'left'
_va = 'center'
elif tloc.startswith('t'):
_ha = 'center'
_va = 'bottom'
elif tloc.startswith('b'):
_ha = 'center'
_va = 'top'
if ha is None: ha = _ha
if va is None: va = _va
# Add vertical line
if VERT:
ll = ax.axvline(pos, **line_kwargs)
trans = mpl.transforms.blended_transform_factory(ax.transData, ax.transAxes)
if tloc.startswith('l'):
_xx = pos
_yy = 0.5
elif tloc.startswith('r'):
_xx = pos
_yy = 0.5
elif tloc.startswith('t'):
_xx = pos
_yy = 1.0 + _PAD
elif tloc.startswith('b'):
_xx = pos
_yy = 0.0 - _PAD
# Add horizontal line
else:
ll = ax.axhline(pos, **line_kwargs)
trans = mpl.transforms.blended_transform_factory(ax.transAxes, ax.transData)
if tloc.startswith('l'):
_xx = 0.0 - _PAD
_yy = pos
elif tloc.startswith('r'):
_xx = 1.0 + _PAD
_yy = pos
elif tloc.startswith('t'):
_xx = 0.5
_yy = pos
elif tloc.startswith('b'):
_xx = 0.5
_yy = pos
if xx is None: xx = _xx
if yy is None: yy = _yy
if dashes: ll.set_dashes(dashes)
txt = text(ax, label, x=xx, y=yy, halign=ha, valign=va, trans=trans, **text_kwargs)
return ll, txt
def get_norm(data, midpoint=None, log=False, filter=None):
"""
"""
if (filter is None) and log:
filter = 'g'
# Determine minimum and maximum
if np.size(data) > 1:
rv = zmath.minmax(data, filter=filter)
if rv is None:
min, max = 0.0, 0.0
else:
min, max = rv
elif np.size(data) == 1:
min, max = 0, np.int(data) - 1
elif np.size(data) == 2:
min, max = data
else:
raise ValueError("Invalid `data` to construct norm!")
# Create normalization
if log:
if midpoint is None:
norm = mpl.colors.LogNorm(vmin=min, vmax=max)
else:
norm = MidpointLogNormalize(vmin=min, vmax=max, midpoint=midpoint)
else:
if midpoint is None:
norm = mpl.colors.Normalize(vmin=min, vmax=max)
else:
norm = MidpointNormalize(vmin=min, vmax=max, midpoint=midpoint)
return norm
# ==================================
# ==== INTERNAL FUNCTIONS ====
# ==================================
def _setAxis_scale(ax, axis, scale, thresh=None):
kw = {}
if scale.startswith('lin'):
scale = 'linear'
elif scale == 'symlog':
if thresh is None:
thresh = 1.0
kw['linthresh' + axis] = thresh
if axis == 'x':
ax.set_xscale(scale, **kw)
elif axis == 'y':
ax.set_yscale(scale, **kw)
else:
raise RuntimeError("Unrecognized ``axis`` = %s" % (axis))
return
def _setAxis_label(ax, axis, label, fs=None, **kwargs):
color = _color_from_kwargs(kwargs)
if axis == 'x':
ax.set_xlabel(label, size=fs, color=color)
elif axis == 'y':
ax.set_ylabel(label, size=fs, color=color)
else:
raise RuntimeError("Unrecognized ``axis`` = %s" % (axis))
return
def _clear_frame(ax=None):
# Taken from a post by Tony S Yu
if ax is None: ax = plt.gca()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
for spine in ax.spines.values(): spine.set_visible(False)
return
def _scale_to_log_flag(scale):
# Check formatting of `scale` str
scale = _clean_scale(scale)
if scale.startswith('log'):
log = True
elif scale.startswith('lin'):
log = False
else:
raise ValueError("Unrecognized `scale` '{}'; must start with 'log' or 'lin'".format(scale))
return log
def _clean_scale(scale):
"""Cleanup a 'scaling' string to be matplotlib compatible.
"""
scale = scale.lower()
if scale.startswith('lin'):
scale = 'linear'
return scale
def _get_cmap(cmap):
"""Retrieve a colormap with the given name if it is not already a colormap.
"""
if isinstance(cmap, six.string_types):
return mpl.cm.get_cmap(cmap)
elif isinstance(cmap, mpl.colors.Colormap):
return cmap
else:
raise ValueError("`cmap` '{}' is not a valid colormap or colormap name".format(cmap))
def _color_from_kwargs(kwargs, pop=False):
msg = "Use `color` instead of `c` or `col` for color specification!"
if 'c' in kwargs:
if pop:
col = kwargs.pop('c')
else:
col = kwargs['c']
warnings.warn(msg, DeprecationWarning, stacklevel=3)
elif 'col' in kwargs:
if pop:
col = kwargs.pop('col')
else:
col = kwargs['col']
warnings.warn(msg, DeprecationWarning, stacklevel=3)
elif 'color' in kwargs:
if pop:
col = kwargs.pop('color')
else:
col = kwargs['color']
else:
col = None
return col
class MidpointNormalize(mpl.colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
super().__init__(vmin, vmax, clip)
self.midpoint = midpoint
return
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
class MidpointLogNormalize(mpl.colors.LogNorm):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
super().__init__(vmin, vmax, clip)
self.midpoint = midpoint
return
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
vals = zmath.interp(value, x, y, xlog=True, ylog=False)
return np.ma.masked_array(vals, np.isnan(value))
# ======================
# ==== DEPRECATED ====
# ======================
def unifyAxesLimits(*args, **kwargs):
utils.dep_warn("unifyAxesLimits", newname="unify_axes_limits")
return smap(*args, **kwargs)
def colormap(*args, **kwargs):
utils.dep_warn("colormap", newname="smap")
return smap(*args, **kwargs)
|
lzkelley/zcode
|
zcode/plot/plot_core.py
|
Python
|
mit
| 56,864
|
[
"Amber"
] |
3f56e426e18c3a5e62139aae1facb006606ab65d7c82988eeac693e08d0b36f5
|
""" DIRAC FileCatalog component representing a directory tree with simple nodes
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
from DIRAC import S_OK, S_ERROR
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryManager.DirectoryTreeBase import DirectoryTreeBase
class DirectoryNodeTree(DirectoryTreeBase):
"""Class managing Directory Tree as a self-linked structure with directory
names stored in each node
"""
def __init__(self, database=None):
DirectoryTreeBase.__init__(self, database)
self.treeTable = "FC_DirectoryTreeM"
def findDir(self, path):
"""Find the identifier of a directory specified by its path"""
dpath = path
if path[0] == "/":
dpath = path[1:]
elements = dpath.split("/")
req = " "
for level in range(len(elements), 0, -1):
if level > 1:
req += "SELECT DirID from FC_DirectoryTreeM WHERE Level=%d AND DirName='%s' AND Parent=(" % (
level,
elements[level - 1],
)
else:
req += "SELECT DirID from FC_DirectoryTreeM WHERE Level=%d AND DirName='%s'" % (
level,
elements[level - 1],
)
req += ")" * (len(elements) - 1)
# print req
result = self.db._query(req)
# print "in findDir",result
if not result["OK"]:
return result
if not result["Value"]:
return S_OK(0)
return S_OK(result["Value"][0][0])
def makeDir(self, path):
"""Create a single directory"""
result = self.findDir(path)
if not result["OK"]:
return result
dirID = result["Value"]
if dirID:
return S_OK(dirID)
dpath = path
if path[0] == "/":
dpath = path[1:]
elements = dpath.split("/")
level = len(elements)
dirName = elements[-1]
result = self.getParent(path)
if not result["OK"]:
return result
parentDirID = result["Value"]
names = ["DirName", "Level", "Parent"]
values = [dirName, level, parentDirID]
result = self.db.insertFields("FC_DirectoryTreeM", names, values)
if not result["OK"]:
return result
return S_OK(result["lastRowId"])
def existsDir(self, path):
"""Check the existence of a directory at the specified path"""
result = self.findDir(path)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK({"Exists": False})
else:
return S_OK({"Exists": True, "DirID": result["Value"]})
def getParent(self, path):
"""Get the parent ID of the given directory"""
dpath = path
if path[0] == "/":
dpath = path[1:]
elements = dpath.split("/")
if len(elements) > 1:
parentDir = os.path.dirname(path)
result = self.findDir(parentDir)
if not result["OK"]:
return result
parentDirID = result["Value"]
if not parentDirID:
return S_ERROR("No parent directory")
return S_OK(parentDirID)
else:
return S_OK(0)
def getParentID(self, dirID):
""" """
if dirID == 0:
return S_ERROR("Root directory ID given")
req = "SELECT Parent FROM FC_DirectoryTreeM WHERE DirID=%d" % dirID
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("No parent found")
return S_OK(result["Value"][0][0])
def getDirectoryName(self, dirID):
"""Get directory name by directory ID"""
req = "SELECT DirName FROM FC_DirectoryTreeM WHERE DirID=%d" % int(dirID)
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Directory with id %d not found" % int(dirID))
return S_OK(result["Value"][0][0])
def getDirectoryPath(self, dirID):
"""Get directory path by directory ID"""
dirPath = ""
dID = dirID
while True:
result = self.getDirectoryName(dID)
if not result["OK"]:
return result
dirPath = "/" + result["Value"] + dirPath
result = self.getParentID(dID)
if not result["OK"]:
return result
if result["Value"] == 0:
break
else:
dID = result["Value"]
return S_OK("/" + dirPath)
def getPathIDs(self, path):
"""Get IDs of all the directories in the parent hierarchy"""
result = self.findDir(path)
if not result["OK"]:
return result
dID = result["Value"]
parentIDs = []
while True:
result = self.getParent(dID)
if not result["OK"]:
return result
dID = result["Value"]
parentIDs.append(dID)
if dID == 0:
break
parentIDs.append(0)
parentIDs.reverse()
return S_OK(parentIDs)
def getChildren(self, path):
"""Get child directory IDs for the given directory"""
if isinstance(path, str):
result = self.findDir(path)
if not result["OK"]:
return result
dirID = result["Value"]
else:
dirID = path
req = "SELECT DirID FROM FC_DirectoryTreeM WHERE Parent=%d" % dirID
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK([])
return S_OK([x[0] for x in result["Value"]])
|
ic-hep/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/DirectoryManager/DirectoryNodeTree.py
|
Python
|
gpl-3.0
| 5,996
|
[
"DIRAC"
] |
233e097ca1cc16bbf78afe8f00431206430e6e26e704ebaa0cdaaef20c68b6bb
|
""" FileManagerBase is a base class for all the specific File Managers
"""
# pylint: disable=protected-access
import six
import os
import stat
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.List import intListToString
from DIRAC.Core.Utilities.Pfn import pfnunparse
class FileManagerBase(object):
"""Base class for all the specific File Managers"""
def __init__(self, database=None):
self.db = database
self.statusDict = {}
def _getConnection(self, connection):
if connection:
return connection
res = self.db._getConnection()
if res["OK"]:
return res["Value"]
gLogger.warn("Failed to get MySQL connection", res["Message"])
return connection
def setDatabase(self, database):
self.db = database
def getFileCounters(self, connection=False):
"""Get a number of counters to verify the sanity of the Files in the catalog"""
connection = self._getConnection(connection)
resultDict = {}
req = "SELECT COUNT(*) FROM FC_Files;"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Files"] = res["Value"][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_Replicas )"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Files w/o Replicas"] = res["Value"][0][0]
req = "SELECT COUNT(RepID) FROM FC_Replicas WHERE FileID NOT IN ( SELECT FileID FROM FC_Files )"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Replicas w/o Files"] = res["Value"][0][0]
treeTable = self.db.dtree.getTreeTable()
req = "SELECT COUNT(FileID) FROM FC_Files WHERE DirID NOT IN ( SELECT DirID FROM %s)" % treeTable
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Orphan Files"] = res["Value"][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_FileInfo)"
res = self.db._query(req, connection)
if not res["OK"]:
resultDict["Files w/o FileInfo"] = 0
else:
resultDict["Files w/o FileInfo"] = res["Value"][0][0]
req = "SELECT COUNT(FileID) FROM FC_FileInfo WHERE FileID NOT IN ( SELECT FileID FROM FC_Files)"
res = self.db._query(req, connection)
if not res["OK"]:
resultDict["FileInfo w/o Files"] = 0
else:
resultDict["FileInfo w/o Files"] = res["Value"][0][0]
return S_OK(resultDict)
def getReplicaCounters(self, connection=False):
"""Get a number of counters to verify the sanity of the Replicas in the catalog"""
connection = self._getConnection(connection)
req = "SELECT COUNT(*) FROM FC_Replicas;"
res = self.db._query(req, connection)
if not res["OK"]:
return res
return S_OK({"Replicas": res["Value"][0][0]})
######################################################
#
# File write methods
#
def _insertFiles(self, lfns, uid, gid, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _deleteFiles(self, toPurge, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _insertReplicas(self, lfns, master=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _findFiles(self, lfns, metadata=["FileID"], allStatus=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getFileReplicas(self, fileIDs, fields_input=["PFN"], allStatus=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getFileIDFromGUID(self, guid, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def getLFNForGUID(self, guids, connection=False):
"""Returns the LFN matching a given GUID"""
return S_ERROR("To be implemented on derived class")
def _setFileParameter(self, fileID, paramName, paramValue, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _deleteReplicas(self, lfns, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _setReplicaStatus(self, fileID, se, status, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _setReplicaHost(self, fileID, se, newSE, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFiles(self, dirID, fileNames, metadata, allStatus=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFileIDs(self, dirID, requestString=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _findFileIDs(self, lfns, connection=False):
"""To be implemented on derived class
Should return following the successful/failed convention
Successful is a dictionary with keys the lfn, and values the FileID"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryReplicas(self, dirID, allStatus=False, connection=False):
"""To be implemented on derived class
Should return with only one value, being a list of all the replicas (FileName,FileID,SEID,PFN)
"""
return S_ERROR("To be implemented on derived class")
def countFilesInDir(self, dirId):
"""Count how many files there is in a given Directory
:param int dirID: directory id
:returns: S_OK(value) or S_ERROR
"""
return S_ERROR("To be implemented on derived class")
def _getFileLFNs(self, fileIDs):
"""Get the file LFNs for a given list of file IDs"""
stringIDs = intListToString(fileIDs)
treeTable = self.db.dtree.getTreeTable()
req = (
"SELECT F.FileID, CONCAT(D.DirName,'/',F.FileName) from FC_Files as F,\
%s as D WHERE F.FileID IN ( %s ) AND F.DirID=D.DirID"
% (treeTable, stringIDs)
)
result = self.db._query(req)
if not result["OK"]:
return result
fileNameDict = {}
for row in result["Value"]:
fileNameDict[row[0]] = row[1]
failed = {}
successful = fileNameDict
if len(fileNameDict) != len(fileIDs):
for id_ in fileIDs:
if id_ not in fileNameDict:
failed[id_] = "File ID not found"
return S_OK({"Successful": successful, "Failed": failed})
def addFile(self, lfns, credDict, connection=False):
"""Add files to the catalog
:param dict lfns: dict{ lfn : info}. 'info' is a dict containing PFN, SE, Size and Checksum
the SE parameter can be a list if we have several replicas to register
"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self._checkInfo(info, ["PFN", "SE", "Size", "Checksum"])
if not res["OK"]:
failed[lfn] = res["Message"]
lfns.pop(lfn)
res = self._addFiles(lfns, credDict, connection=connection)
if not res["OK"]:
for lfn in lfns.keys():
failed[lfn] = res["Message"]
else:
failed.update(res["Value"]["Failed"])
successful.update(res["Value"]["Successful"])
return S_OK({"Successful": successful, "Failed": failed})
def _addFiles(self, lfns, credDict, connection=False):
"""Main file adding method"""
connection = self._getConnection(connection)
successful = {}
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result["OK"]:
return result
uid, gid = result["Value"]
# prepare lfns with master replicas - the first in the list or a unique replica
masterLfns = {}
extraLfns = {}
for lfn in lfns:
masterLfns[lfn] = dict(lfns[lfn])
if isinstance(lfns[lfn].get("SE"), list):
masterLfns[lfn]["SE"] = lfns[lfn]["SE"][0]
if len(lfns[lfn]["SE"]) > 1:
extraLfns[lfn] = dict(lfns[lfn])
extraLfns[lfn]["SE"] = lfns[lfn]["SE"][1:]
# Check whether the supplied files have been registered already
res = self._getExistingMetadata(list(masterLfns), connection=connection)
if not res["OK"]:
return res
existingMetadata, failed = res["Value"]
if existingMetadata:
success, fail = self._checkExistingMetadata(existingMetadata, masterLfns)
successful.update(success)
failed.update(fail)
for lfn in list(success) + list(fail):
masterLfns.pop(lfn)
# If GUIDs are supposed to be unique check their pre-existance
if self.db.uniqueGUID:
fail = self._checkUniqueGUID(masterLfns, connection=connection)
failed.update(fail)
for lfn in fail:
masterLfns.pop(lfn)
# If we have files left to register
if masterLfns:
# Create the directories for the supplied files and store their IDs
directories = self._getFileDirectories(list(masterLfns))
for directory, fileNames in directories.items():
res = self.db.dtree.makeDirectories(directory, credDict)
if not res["OK"]:
for fileName in fileNames:
lfn = os.path.join(directory, fileName)
failed[lfn] = res["Message"]
masterLfns.pop(lfn)
continue
for fileName in fileNames:
if not fileName:
failed[directory] = "Is no a valid file"
masterLfns.pop(directory)
continue
lfn = "%s/%s" % (directory, fileName)
lfn = lfn.replace("//", "/")
# This condition should never be true, we would not be here otherwise...
if not res["OK"]:
failed[lfn] = "Failed to create directory for file"
masterLfns.pop(lfn)
else:
masterLfns[lfn]["DirID"] = res["Value"]
# If we still have files left to register
if masterLfns:
res = self._insertFiles(masterLfns, uid, gid, connection=connection)
if not res["OK"]:
for lfn in list(masterLfns): # pylint: disable=consider-iterating-dictionary
failed[lfn] = res["Message"]
masterLfns.pop(lfn)
else:
for lfn, error in res["Value"]["Failed"].items():
failed[lfn] = error
masterLfns.pop(lfn)
masterLfns = res["Value"]["Successful"]
# Add the ancestors
if masterLfns:
res = self._populateFileAncestors(masterLfns, connection=connection)
toPurge = []
if not res["OK"]:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering ancestors"
toPurge.append(masterLfns[lfn]["FileID"])
else:
failed.update(res["Value"]["Failed"])
for lfn, error in res["Value"]["Failed"].items():
toPurge.append(masterLfns[lfn]["FileID"])
if toPurge:
self._deleteFiles(toPurge, connection=connection)
# Register the replicas
newlyRegistered = {}
if masterLfns:
res = self._insertReplicas(masterLfns, master=True, connection=connection)
toPurge = []
if not res["OK"]:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering replica"
toPurge.append(masterLfns[lfn]["FileID"])
else:
newlyRegistered = res["Value"]["Successful"]
successful.update(newlyRegistered)
failed.update(res["Value"]["Failed"])
for lfn, error in res["Value"]["Failed"].items():
toPurge.append(masterLfns[lfn]["FileID"])
if toPurge:
self._deleteFiles(toPurge, connection=connection)
# Add extra replicas for successfully registered LFNs
for lfn in list(extraLfns):
if lfn not in successful:
extraLfns.pop(lfn)
if extraLfns:
res = self._findFiles(list(extraLfns), ["FileID", "DirID"], connection=connection)
if not res["OK"]:
for lfn in list(lfns):
failed[lfn] = "Failed while registering extra replicas"
successful.pop(lfn)
extraLfns.pop(lfn)
else:
failed.update(res["Value"]["Failed"])
for lfn in res["Value"]["Failed"]:
successful.pop(lfn)
extraLfns.pop(lfn)
for lfn, fileDict in res["Value"]["Successful"].items():
extraLfns[lfn]["FileID"] = fileDict["FileID"]
extraLfns[lfn]["DirID"] = fileDict["DirID"]
if extraLfns:
res = self._insertReplicas(extraLfns, master=False, connection=connection)
if not res["OK"]:
for lfn in extraLfns: # pylint: disable=consider-iterating-dictionary
failed[lfn] = "Failed while registering extra replicas"
successful.pop(lfn)
else:
newlyRegistered = res["Value"]["Successful"]
successful.update(newlyRegistered)
failed.update(res["Value"]["Failed"])
return S_OK({"Successful": successful, "Failed": failed})
def _updateDirectoryUsage(self, directorySEDict, change, connection=False):
connection = self._getConnection(connection)
for directoryID in directorySEDict.keys():
result = self.db.dtree.getPathIDsByID(directoryID)
if not result["OK"]:
return result
parentIDs = result["Value"]
dirDict = directorySEDict[directoryID]
for seID in dirDict.keys():
seDict = dirDict[seID]
files = seDict["Files"]
size = seDict["Size"]
insertTuples = []
for dirID in parentIDs:
insertTuples.append("(%d,%d,%d,%d,UTC_TIMESTAMP())" % (dirID, seID, size, files))
req = "INSERT INTO FC_DirectoryUsage (DirID,SEID,SESize,SEFiles,LastUpdate) "
req += "VALUES %s" % ",".join(insertTuples)
req += (
" ON DUPLICATE KEY UPDATE SESize=SESize%s%d, SEFiles=SEFiles%s%d, LastUpdate=UTC_TIMESTAMP() "
% (change, size, change, files)
)
res = self.db._update(req)
if not res["OK"]:
gLogger.warn("Failed to update FC_DirectoryUsage", res["Message"])
return S_OK()
def _populateFileAncestors(self, lfns, connection=False):
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, lfnDict in lfns.items():
originalFileID = lfnDict["FileID"]
originalDepth = lfnDict.get("AncestorDepth", 1)
ancestors = lfnDict.get("Ancestors", [])
if isinstance(ancestors, six.string_types):
ancestors = [ancestors]
if lfn in ancestors:
ancestors.remove(lfn)
if not ancestors:
successful[lfn] = True
continue
res = self._findFiles(ancestors, connection=connection)
if res["Value"]["Failed"]:
failed[lfn] = "Failed to resolve ancestor files"
continue
ancestorIDs = res["Value"]["Successful"]
fileIDLFNs = {}
toInsert = {}
for ancestor in ancestorIDs.keys():
fileIDLFNs[ancestorIDs[ancestor]["FileID"]] = ancestor
toInsert[ancestorIDs[ancestor]["FileID"]] = originalDepth
res = self._getFileAncestors(list(fileIDLFNs))
if not res["OK"]:
failed[lfn] = "Failed to obtain all ancestors"
continue
fileIDAncestorDict = res["Value"]
for fileIDDict in fileIDAncestorDict.values():
for ancestorID, relativeDepth in fileIDDict.items():
toInsert[ancestorID] = relativeDepth + originalDepth
res = self._insertFileAncestors(originalFileID, toInsert, connection=connection)
if not res["OK"]:
if "Duplicate" in res["Message"]:
failed[lfn] = "Failed to insert ancestor files: duplicate entry"
else:
failed[lfn] = "Failed to insert ancestor files"
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def _insertFileAncestors(self, fileID, ancestorDict, connection=False):
connection = self._getConnection(connection)
ancestorTuples = []
for ancestorID, depth in ancestorDict.items():
ancestorTuples.append("(%d,%d,%d)" % (fileID, ancestorID, depth))
if not ancestorTuples:
return S_OK()
req = "INSERT INTO FC_FileAncestors (FileID, AncestorID, AncestorDepth) VALUES %s" % intListToString(
ancestorTuples
)
return self.db._update(req, connection)
def _getFileAncestors(self, fileIDs, depths=[], connection=False):
connection = self._getConnection(connection)
req = "SELECT FileID, AncestorID, AncestorDepth FROM FC_FileAncestors WHERE FileID IN (%s)" % intListToString(
fileIDs
)
if depths:
req = "%s AND AncestorDepth IN (%s);" % (req, intListToString(depths))
res = self.db._query(req, connection)
if not res["OK"]:
return res
fileIDAncestors = {}
for fileID, ancestorID, depth in res["Value"]:
if fileID not in fileIDAncestors:
fileIDAncestors[fileID] = {}
fileIDAncestors[fileID][ancestorID] = depth
return S_OK(fileIDAncestors)
def _getFileDescendents(self, fileIDs, depths, connection=False):
connection = self._getConnection(connection)
req = (
"SELECT AncestorID, FileID, AncestorDepth FROM FC_FileAncestors WHERE AncestorID IN (%s)"
% intListToString(fileIDs)
)
if depths:
req = "%s AND AncestorDepth IN (%s);" % (req, intListToString(depths))
res = self.db._query(req, connection)
if not res["OK"]:
return res
fileIDAncestors = {}
for ancestorID, fileID, depth in res["Value"]:
if ancestorID not in fileIDAncestors:
fileIDAncestors[ancestorID] = {}
fileIDAncestors[ancestorID][fileID] = depth
return S_OK(fileIDAncestors)
def addFileAncestors(self, lfns, connection=False):
"""Add file ancestors to the catalog"""
connection = self._getConnection(connection)
failed = {}
successful = {}
result = self._findFiles(list(lfns), connection=connection)
if not result["OK"]:
return result
if result["Value"]["Failed"]:
failed.update(result["Value"]["Failed"])
for lfn in result["Value"]["Failed"]:
lfns.pop(lfn)
if not lfns:
return S_OK({"Successful": successful, "Failed": failed})
for lfn in result["Value"]["Successful"]:
lfns[lfn]["FileID"] = result["Value"]["Successful"][lfn]["FileID"]
result = self._populateFileAncestors(lfns, connection)
if not result["OK"]:
return result
failed.update(result["Value"]["Failed"])
successful = result["Value"]["Successful"]
return S_OK({"Successful": successful, "Failed": failed})
def _getFileRelatives(self, lfns, depths, relation, connection=False):
connection = self._getConnection(connection)
failed = {}
successful = {}
result = self._findFiles(list(lfns), connection=connection)
if not result["OK"]:
return result
if result["Value"]["Failed"]:
failed.update(result["Value"]["Failed"])
for lfn in result["Value"]["Failed"]:
lfns.pop(lfn)
if not lfns:
return S_OK({"Successful": successful, "Failed": failed})
inputIDDict = {}
for lfn in result["Value"]["Successful"]:
inputIDDict[result["Value"]["Successful"][lfn]["FileID"]] = lfn
inputIDs = list(inputIDDict)
if relation == "ancestor":
result = self._getFileAncestors(inputIDs, depths, connection)
else:
result = self._getFileDescendents(inputIDs, depths, connection)
if not result["OK"]:
return result
failed = {}
successful = {}
relDict = result["Value"]
for id_ in inputIDs:
if id_ in relDict:
result = self._getFileLFNs(list(relDict[id_]))
if not result["OK"]:
failed[inputIDDict[id]] = "Failed to find %s" % relation
else:
if result["Value"]["Successful"]:
resDict = {}
for aID in result["Value"]["Successful"]:
resDict[result["Value"]["Successful"][aID]] = relDict[id_][aID]
successful[inputIDDict[id_]] = resDict
for aID in result["Value"]["Failed"]:
failed[inputIDDict[id_]] = "Failed to get the ancestor LFN"
else:
successful[inputIDDict[id_]] = {}
return S_OK({"Successful": successful, "Failed": failed})
def getFileAncestors(self, lfns, depths, connection=False):
return self._getFileRelatives(lfns, depths, "ancestor", connection)
def getFileDescendents(self, lfns, depths, connection=False):
return self._getFileRelatives(lfns, depths, "descendent", connection)
def _getExistingMetadata(self, lfns, connection=False):
connection = self._getConnection(connection)
# Check whether the files already exist before adding
res = self._findFiles(lfns, ["FileID", "Size", "Checksum", "GUID"], connection=connection)
if not res["OK"]:
return res
successful = res["Value"]["Successful"]
failed = res["Value"]["Failed"]
for lfn, error in list(failed.items()):
if error == "No such file or directory":
failed.pop(lfn)
return S_OK((successful, failed))
def _checkExistingMetadata(self, existingLfns, lfns):
failed = {}
successful = {}
fileIDLFNs = {}
for lfn, fileDict in existingLfns.items():
fileIDLFNs[fileDict["FileID"]] = lfn
# For those that exist get the replicas to determine whether they are already registered
res = self._getFileReplicas(list(fileIDLFNs))
if not res["OK"]:
for lfn in fileIDLFNs.values():
failed[lfn] = "Failed checking pre-existing replicas"
else:
replicaDict = res["Value"]
for fileID, lfn in fileIDLFNs.items():
fileMetadata = existingLfns[lfn]
existingGuid = fileMetadata["GUID"]
existingSize = fileMetadata["Size"]
existingChecksum = fileMetadata["Checksum"]
newGuid = lfns[lfn]["GUID"]
newSize = lfns[lfn]["Size"]
newChecksum = lfns[lfn]["Checksum"]
# Ensure that the key file metadata is the same
if (existingGuid != newGuid) or (existingSize != newSize) or (existingChecksum != newChecksum):
failed[lfn] = "File already registered with alternative metadata"
# If the DB does not have replicas for this file return an error
elif fileID not in replicaDict or not replicaDict[fileID]:
failed[lfn] = "File already registered with no replicas"
# If the supplied SE is not in the existing replicas return an error
elif not lfns[lfn]["SE"] in replicaDict[fileID].keys():
failed[lfn] = "File already registered with alternative replicas"
# If we get here the file being registered already exists exactly in the DB
else:
successful[lfn] = True
return successful, failed
def _checkUniqueGUID(self, lfns, connection=False):
connection = self._getConnection(connection)
guidLFNs = {}
failed = {}
for lfn, fileDict in lfns.items():
guidLFNs[fileDict["GUID"]] = lfn
res = self._getFileIDFromGUID(list(guidLFNs), connection=connection)
if not res["OK"]:
return dict.fromkeys(lfns, res["Message"])
for guid, fileID in res["Value"].items():
# resolve this to LFN
failed[guidLFNs[guid]] = "GUID already registered for another file %s" % fileID
return failed
def removeFile(self, lfns, connection=False):
connection = self._getConnection(connection)
""" Remove file from the catalog """
successful = {}
failed = {}
res = self._findFiles(lfns, ["DirID", "FileID", "Size"], connection=connection)
if not res["OK"]:
return res
for lfn, error in res["Value"]["Failed"].items():
if error == "No such file or directory":
successful[lfn] = True
else:
failed[lfn] = error
fileIDLfns = {}
lfns = res["Value"]["Successful"]
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict["FileID"]] = lfn
res = self._computeStorageUsageOnRemoveFile(lfns, connection=connection)
if not res["OK"]:
return res
directorySESizeDict = res["Value"]
# Now do removal
res = self._deleteFiles(list(fileIDLfns), connection=connection)
if not res["OK"]:
for lfn in fileIDLfns.values():
failed[lfn] = res["Message"]
else:
# Update the directory usage
self._updateDirectoryUsage(directorySESizeDict, "-", connection=connection)
for lfn in fileIDLfns.values():
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def _computeStorageUsageOnRemoveFile(self, lfns, connection=False):
# Resolve the replicas to calculate reduction in storage usage
fileIDLfns = {}
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict["FileID"]] = lfn
res = self._getFileReplicas(list(fileIDLfns), connection=connection)
if not res["OK"]:
return res
directorySESizeDict = {}
for fileID, seDict in res["Value"].items():
dirID = lfns[fileIDLfns[fileID]]["DirID"]
size = lfns[fileIDLfns[fileID]]["Size"]
directorySESizeDict.setdefault(dirID, {})
directorySESizeDict[dirID].setdefault(0, {"Files": 0, "Size": 0})
directorySESizeDict[dirID][0]["Size"] += size
directorySESizeDict[dirID][0]["Files"] += 1
for seName in seDict.keys():
res = self.db.seManager.findSE(seName)
if not res["OK"]:
return res
seID = res["Value"]
size = lfns[fileIDLfns[fileID]]["Size"]
directorySESizeDict[dirID].setdefault(seID, {"Files": 0, "Size": 0})
directorySESizeDict[dirID][seID]["Size"] += size
directorySESizeDict[dirID][seID]["Files"] += 1
return S_OK(directorySESizeDict)
def setFileStatus(self, lfns, connection=False):
"""Get set the group for the supplied files"""
connection = self._getConnection(connection)
res = self._findFiles(lfns, ["FileID", "UID"], connection=connection)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {}
for lfn in res["Value"]["Successful"]:
status = lfns[lfn]
if isinstance(status, six.string_types):
if status not in self.db.validFileStatus:
failed[lfn] = "Invalid file status %s" % status
continue
result = self._getStatusInt(status, connection=connection)
if not result["OK"]:
failed[lfn] = res["Message"]
continue
status = result["Value"]
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setFileParameter(fileID, "Status", status, connection=connection)
if not res["OK"]:
failed[lfn] = res["Message"]
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
######################################################
#
# Replica write methods
#
def addReplica(self, lfns, connection=False):
"""Add replica to the catalog"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self._checkInfo(info, ["PFN", "SE"])
if not res["OK"]:
failed[lfn] = res["Message"]
lfns.pop(lfn)
res = self._addReplicas(lfns, connection=connection)
if not res["OK"]:
for lfn in lfns:
failed[lfn] = res["Message"]
else:
failed.update(res["Value"]["Failed"])
successful.update(res["Value"]["Successful"])
return S_OK({"Successful": successful, "Failed": failed})
def _addReplicas(self, lfns, connection=False):
connection = self._getConnection(connection)
successful = {}
res = self._findFiles(list(lfns), ["DirID", "FileID", "Size"], connection=connection)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
for lfn in failed:
lfns.pop(lfn)
lfnFileIDDict = res["Value"]["Successful"]
for lfn, fileDict in lfnFileIDDict.items():
lfns[lfn].update(fileDict)
res = self._insertReplicas(lfns, connection=connection)
if not res["OK"]:
for lfn in lfns:
failed[lfn] = res["Message"]
else:
successful = res["Value"]["Successful"]
failed.update(res["Value"]["Failed"])
return S_OK({"Successful": successful, "Failed": failed})
def removeReplica(self, lfns, connection=False):
"""Remove replica from catalog"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self._checkInfo(info, ["SE"])
if not res["OK"]:
failed[lfn] = res["Message"]
lfns.pop(lfn)
res = self._deleteReplicas(lfns, connection=connection)
if not res["OK"]:
for lfn in lfns.keys():
failed[lfn] = res["Message"]
else:
failed.update(res["Value"]["Failed"])
successful.update(res["Value"]["Successful"])
return S_OK({"Successful": successful, "Failed": failed})
def setReplicaStatus(self, lfns, connection=False):
"""Set replica status in the catalog"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ["SE", "Status"])
if not res["OK"]:
failed[lfn] = res["Message"]
continue
status = info["Status"]
se = info["SE"]
res = self._findFiles([lfn], ["FileID"], connection=connection)
if lfn not in res["Value"]["Successful"]:
failed[lfn] = res["Value"]["Failed"][lfn]
continue
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setReplicaStatus(fileID, se, status, connection=connection)
if res["OK"]:
successful[lfn] = res["Value"]
else:
failed[lfn] = res["Message"]
return S_OK({"Successful": successful, "Failed": failed})
def setReplicaHost(self, lfns, connection=False):
"""Set replica host in the catalog"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ["SE", "NewSE"])
if not res["OK"]:
failed[lfn] = res["Message"]
continue
newSE = info["NewSE"]
se = info["SE"]
res = self._findFiles([lfn], ["FileID"], connection=connection)
if lfn not in res["Value"]["Successful"]:
failed[lfn] = res["Value"]["Failed"][lfn]
continue
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setReplicaHost(fileID, se, newSE, connection=connection)
if res["OK"]:
successful[lfn] = res["Value"]
else:
failed[lfn] = res["Message"]
return S_OK({"Successful": successful, "Failed": failed})
######################################################
#
# File read methods
#
def exists(self, lfns, connection=False):
"""Determine whether a file exists in the catalog"""
connection = self._getConnection(connection)
res = self._findFiles(lfns, allStatus=True, connection=connection)
if not res["OK"]:
return res
successful = res["Value"]["Successful"]
origFailed = res["Value"]["Failed"]
for lfn in successful:
successful[lfn] = lfn
failed = {}
if self.db.uniqueGUID:
guidList = []
val = None
# Try to identify if the GUID is given
# We consider only 2 options :
# either {lfn : guid}
# or P lfn : {PFN : .., GUID : ..} }
if isinstance(lfns, dict):
val = list(lfns.values())
# We have values, take the first to identify the type
if val:
val = val[0]
if isinstance(val, dict) and "GUID" in val:
# We are in the case {lfn : {PFN:.., GUID:..}}
guidList = [lfns[lfn]["GUID"] for lfn in lfns]
elif isinstance(val, six.string_types):
# We hope that it is the GUID which is given
guidList = list(lfns.values())
if guidList:
# A dict { guid: lfn to which it is supposed to be associated }
guidToGivenLfn = dict(zip(guidList, lfns))
res = self.getLFNForGUID(guidList, connection)
if not res["OK"]:
return res
guidLfns = res["Value"]["Successful"]
for guid, realLfn in guidLfns.items():
successful[guidToGivenLfn[guid]] = realLfn
for lfn, error in origFailed.items():
# It could be in successful because the guid exists with another lfn
if lfn in successful:
continue
if error == "No such file or directory":
successful[lfn] = False
else:
failed[lfn] = error
return S_OK({"Successful": successful, "Failed": failed})
def isFile(self, lfns, connection=False):
"""Determine whether a path is a file in the catalog"""
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
return self.exists(lfns, connection=connection)
def getFileSize(self, lfns, connection=False):
"""Get file size from the catalog"""
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
res = self._findFiles(lfns, ["Size"], connection=connection)
if not res["OK"]:
return res
totalSize = 0
for lfn in res["Value"]["Successful"]:
size = res["Value"]["Successful"][lfn]["Size"]
res["Value"]["Successful"][lfn] = size
totalSize += size
res["TotalSize"] = totalSize
return res
def getFileMetadata(self, lfns, connection=False):
"""Get file metadata from the catalog"""
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
return self._findFiles(
lfns,
[
"Size",
"Checksum",
"ChecksumType",
"UID",
"GID",
"GUID",
"CreationDate",
"ModificationDate",
"Mode",
"Status",
],
connection=connection,
)
def getPathPermissions(self, paths, credDict, connection=False):
"""Get the permissions for the supplied paths"""
connection = self._getConnection(connection)
res = self.db.ugManager.getUserAndGroupID(credDict)
if not res["OK"]:
return res
uid, gid = res["Value"]
res = self._findFiles(paths, metadata=["Mode", "UID", "GID"], connection=connection)
if not res["OK"]:
return res
successful = {}
for dirName, dirDict in res["Value"]["Successful"].items():
mode = dirDict["Mode"]
p_uid = dirDict["UID"]
p_gid = dirDict["GID"]
successful[dirName] = {}
if p_uid == uid:
successful[dirName]["Read"] = mode & stat.S_IRUSR
successful[dirName]["Write"] = mode & stat.S_IWUSR
successful[dirName]["Execute"] = mode & stat.S_IXUSR
elif p_gid == gid:
successful[dirName]["Read"] = mode & stat.S_IRGRP
successful[dirName]["Write"] = mode & stat.S_IWGRP
successful[dirName]["Execute"] = mode & stat.S_IXGRP
else:
successful[dirName]["Read"] = mode & stat.S_IROTH
successful[dirName]["Write"] = mode & stat.S_IWOTH
successful[dirName]["Execute"] = mode & stat.S_IXOTH
return S_OK({"Successful": successful, "Failed": res["Value"]["Failed"]})
######################################################
#
# Replica read methods
#
def __getReplicasForIDs(self, fileIDLfnDict, allStatus, connection=False):
"""Get replicas for files with already resolved IDs"""
replicas = {}
if fileIDLfnDict:
fields = []
if not self.db.lfnPfnConvention or self.db.lfnPfnConvention == "Weak":
fields = ["PFN"]
res = self._getFileReplicas(
list(fileIDLfnDict), fields_input=fields, allStatus=allStatus, connection=connection
)
if not res["OK"]:
return res
for fileID, seDict in res["Value"].items():
lfn = fileIDLfnDict[fileID]
replicas[lfn] = {}
for se, repDict in seDict.items():
pfn = repDict.get("PFN", "")
replicas[lfn][se] = pfn
result = S_OK(replicas)
return result
def getReplicas(self, lfns, allStatus, connection=False):
"""Get file replicas from the catalog"""
connection = self._getConnection(connection)
# Get FileID <-> LFN correspondence first
res = self._findFileIDs(lfns, connection=connection)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
fileIDLFNs = {}
for lfn, fileID in res["Value"]["Successful"].items():
fileIDLFNs[fileID] = lfn
result = self.__getReplicasForIDs(fileIDLFNs, allStatus, connection)
if not result["OK"]:
return result
replicas = result["Value"]
return S_OK({"Successful": replicas, "Failed": failed})
def getReplicasByMetadata(self, metaDict, path, allStatus, credDict, connection=False):
"""Get file replicas for files corresponding to the given metadata"""
connection = self._getConnection(connection)
# Get FileID <-> LFN correspondence first
failed = {}
result = self.db.fmeta.findFilesByMetadata(metaDict, path, credDict)
if not result["OK"]:
return result
idLfnDict = result["Value"]
result = self.__getReplicasForIDs(idLfnDict, allStatus, connection)
if not result["OK"]:
return result
replicas = result["Value"]
return S_OK({"Successful": replicas, "Failed": failed})
def getReplicaStatus(self, lfns, connection=False):
"""Get replica status from the catalog"""
connection = self._getConnection(connection)
res = self._findFiles(lfns, connection=connection)
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
fileIDLFNs = {}
for lfn, fileDict in res["Value"]["Successful"].items():
fileID = fileDict["FileID"]
fileIDLFNs[fileID] = lfn
successful = {}
if fileIDLFNs:
res = self._getFileReplicas(list(fileIDLFNs), allStatus=True, connection=connection)
if not res["OK"]:
return res
for fileID, seDict in res["Value"].items():
lfn = fileIDLFNs[fileID]
requestedSE = lfns[lfn]
if not requestedSE:
failed[lfn] = "Replica info not supplied"
elif requestedSE not in seDict:
failed[lfn] = "No replica at supplied site"
else:
successful[lfn] = seDict[requestedSE]["Status"]
return S_OK({"Successful": successful, "Failed": failed})
######################################################
#
# General usage methods
#
def _getStatusInt(self, status, connection=False):
connection = self._getConnection(connection)
req = "SELECT StatusID FROM FC_Statuses WHERE Status = '%s';" % status
res = self.db._query(req, connection)
if not res["OK"]:
return res
if res["Value"]:
return S_OK(res["Value"][0][0])
req = "INSERT INTO FC_Statuses (Status) VALUES ('%s');" % status
res = self.db._update(req, connection)
if not res["OK"]:
return res
return S_OK(res["lastRowId"])
def _getIntStatus(self, statusID, connection=False):
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
connection = self._getConnection(connection)
req = "SELECT StatusID,Status FROM FC_Statuses"
res = self.db._query(req, connection)
if not res["OK"]:
return res
if res["Value"]:
for row in res["Value"]:
self.statusDict[int(row[0])] = row[1]
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
return S_OK("Unknown")
def getFileIDsInDirectory(self, dirID, requestString=False):
"""Get a list of IDs for all the files stored in given directories or their
subdirectories
:param dirID: single directory ID or a list of directory IDs
:type dirID: int or python:list[int]
:param bool requestString: if True return result as a SQL SELECT string
:return: list of file IDs or SELECT string
"""
return self._getDirectoryFileIDs(dirID, requestString=requestString)
def getFilesInDirectory(self, dirID, verbose=False, connection=False):
connection = self._getConnection(connection)
files = {}
res = self._getDirectoryFiles(
dirID,
[],
[
"FileID",
"Size",
"GUID",
"Checksum",
"ChecksumType",
"Type",
"UID",
"GID",
"CreationDate",
"ModificationDate",
"Mode",
"Status",
],
connection=connection,
)
if not res["OK"]:
return res
if not res["Value"]:
return S_OK(files)
fileIDNames = {}
for fileName, fileDict in res["Value"].items():
try:
files[fileName] = {}
files[fileName]["MetaData"] = fileDict
fileIDNames[fileDict["FileID"]] = fileName
except KeyError:
# If we return S_ERROR here, it gets treated as an empty directory in most cases
# and the user isn't actually warned
raise Exception(
"File entry for '%s' is corrupt (DirID %s), please contact the catalog administrator"
% (fileName, dirID)
)
if verbose:
result = self._getFileReplicas(list(fileIDNames), connection=connection)
if not result["OK"]:
return result
for fileID, seDict in result["Value"].items():
fileName = fileIDNames[fileID]
files[fileName]["Replicas"] = seDict
return S_OK(files)
def getDirectoryReplicas(self, dirID, path, allStatus=False, connection=False):
"""Get the replicas for all the Files in the given Directory
:param int dirID: ID of the directory
:param unused path: useless
:param bool allStatus: whether all replicas and file status are considered
If False, take the visibleFileStatus and visibleReplicaStatus values from the configuration
"""
connection = self._getConnection(connection)
result = self._getDirectoryReplicas(dirID, allStatus, connection)
if not result["OK"]:
return result
resultDict = {}
seDict = {}
for fileName, fileID, seID, pfn in result["Value"]:
resultDict.setdefault(fileName, {})
if seID not in seDict:
res = self.db.seManager.getSEName(seID)
if not res["OK"]:
seDict[seID] = "Unknown"
else:
seDict[seID] = res["Value"]
se = seDict[seID]
resultDict[fileName][se] = pfn
return S_OK(resultDict)
def _getFileDirectories(self, lfns):
"""For a list of lfn, returns a dictionary with key the directory, and value
the files in that directory. It does not make any query, just splits the names
:param lfns: list of lfns
:type lfns: python:list
"""
dirDict = {}
for lfn in lfns:
lfnDir = os.path.dirname(lfn)
lfnFile = os.path.basename(lfn)
dirDict.setdefault(lfnDir, [])
dirDict[lfnDir].append(lfnFile)
return dirDict
def _checkInfo(self, info, requiredKeys):
if not info:
return S_ERROR("Missing parameters")
for key in requiredKeys:
if key not in info:
return S_ERROR("Missing '%s' parameter" % key)
return S_OK()
# def _checkLFNPFNConvention( self, lfn, pfn, se ):
# """ Check that the PFN corresponds to the LFN-PFN convention """
# if pfn == lfn:
# return S_OK()
# if ( len( pfn ) < len( lfn ) ) or ( pfn[-len( lfn ):] != lfn ) :
# return S_ERROR( 'PFN does not correspond to the LFN convention' )
# return S_OK()
def changeFileGroup(self, lfns):
"""Get set the group for the supplied files
:param lfns: dictionary < lfn : group >
:param int/str newGroup: optional new group/groupID the same for all the supplied lfns
"""
res = self._findFiles(lfns, ["FileID", "GID"])
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {}
for lfn in res["Value"]["Successful"]:
group = lfns[lfn]
if isinstance(group, six.string_types):
groupRes = self.db.ugManager.findGroup(group)
if not groupRes["OK"]:
return groupRes
group = groupRes["Value"]
currentGroup = res["Value"]["Successful"][lfn]["GID"]
if int(group) == int(currentGroup):
successful[lfn] = True
else:
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setFileParameter(fileID, "GID", group)
if not res["OK"]:
failed[lfn] = res["Message"]
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def changeFileOwner(self, lfns):
"""Set the owner for the supplied files
:param lfns: dictionary < lfn : owner >
:param int/str newOwner: optional new user/userID the same for all the supplied lfns
"""
res = self._findFiles(lfns, ["FileID", "UID"])
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {}
for lfn in res["Value"]["Successful"]:
owner = lfns[lfn]
if isinstance(owner, six.string_types):
userRes = self.db.ugManager.findUser(owner)
if not userRes["OK"]:
return userRes
owner = userRes["Value"]
currentOwner = res["Value"]["Successful"][lfn]["UID"]
if int(owner) == int(currentOwner):
successful[lfn] = True
else:
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setFileParameter(fileID, "UID", owner)
if not res["OK"]:
failed[lfn] = res["Message"]
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def changeFileMode(self, lfns):
""" " Set the mode for the supplied files
:param lfns: dictionary < lfn : mode >
:param int newMode: optional new mode the same for all the supplied lfns
"""
res = self._findFiles(lfns, ["FileID", "Mode"])
if not res["OK"]:
return res
failed = res["Value"]["Failed"]
successful = {}
for lfn in res["Value"]["Successful"]:
mode = lfns[lfn]
currentMode = res["Value"]["Successful"][lfn]["Mode"]
if int(currentMode) == int(mode):
successful[lfn] = True
else:
fileID = res["Value"]["Successful"][lfn]["FileID"]
res = self._setFileParameter(fileID, "Mode", mode)
if not res["OK"]:
failed[lfn] = res["Message"]
else:
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def setFileOwner(self, path, owner):
"""Set the file owner
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param owner: new user as a string or int uid
:type owner: str or int
"""
result = self.db.ugManager.findUser(owner)
if not result["OK"]:
return result
uid = result["Value"]
return self._setFileParameter(path, "UID", uid)
def setFileGroup(self, path, gname):
"""Set the file group
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param gname: new group as a string or int gid
:type gname: str or int
"""
result = self.db.ugManager.findGroup(gname)
if not result["OK"]:
return result
gid = result["Value"]
return self._setFileParameter(path, "GID", gid)
def setFileMode(self, path, mode):
"""Set the file mode
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param int mode: new mode
"""
return self._setFileParameter(path, "Mode", mode)
def getSEDump(self, seName):
"""
Return all the files at a given SE, together with checksum and size
:param seName: name of the StorageElement
:returns: S_OK with list of tuples (lfn, checksum, size)
"""
return S_ERROR("To be implemented on derived class")
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/FileManager/FileManagerBase.py
|
Python
|
gpl-3.0
| 54,505
|
[
"DIRAC"
] |
c11982273e13d35cc6b3f4f10e23b936b1f3f14cf1cb748fd0336d6a5e19877e
|
#### PATTERN | NL | INFLECT ########################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
####################################################################################################
# Regular expressions-based rules for Dutch word inflection:
# - pluralization and singularization of nouns,
# - conjugation of verbs,
# - predicative and attributive of adjectives.
# Accuracy (measured on CELEX Dutch morphology word forms):
# 79% for pluralize()
# 91% for singularize()
# 90% for Verbs.find_lemma()
# 88% for Verbs.find_lexeme()
# 99% for predicative()
# 99% for attributive()
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.abspath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
PROGRESSIVE,
PARTICIPLE
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = ("a", "e", "i", "o", "u")
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### PLURALIZE ######################################################################################
plural_irregular_en = set(("dag", "dak", "dal", "pad", "vat", "weg"))
plural_irregular_een = set(("fee", "genie", "idee", "orgie", "ree"))
plural_irregular_eren = set(("blad", "ei", "gelid", "gemoed", "kalf", "kind", "lied", "rad", "rund"))
plural_irregular_deren = set(("hoen", "been"))
plural_irregular = {
"centrum": "centra",
"escargot": "escargots",
"gedrag": "gedragingen",
"gelid": "gelederen",
"kaars": "kaarsen",
"kleed": "kleren",
"koe": "koeien",
"lam": "lammeren",
"museum": "museums",
"stad": "steden",
"stoel": "stoelen",
"vlo": "vlooien"
}
def pluralize(word, pos=NOUN, custom={}):
""" Returns the plural of a given word.
For example: stad => steden.
The custom dictionary is for user-defined replacements.
"""
if word in custom.keys():
return custom[word]
w = word.lower()
if pos == NOUN:
if w in plural_irregular_en: # dag => dagen
return w + "en"
if w in plural_irregular_een: # fee => feeën
return w + u"ën"
if w in plural_irregular_eren: # blad => bladeren
return w + "eren"
if w in plural_irregular_deren: # been => beenderen
return w + "deren"
if w in plural_irregular:
return plural_irregular[w]
# Words ending in -icus get -ici: academicus => academici
if w.endswith("icus"):
return w[:-2] + "i"
# Words ending in -s usually get -sen: les => lessen.
if w.endswith(("es", "as", "nis", "ris", "vis")):
return w + "sen"
# Words ending in -s usually get -zen: huis => huizen.
if w.endswith("s") and not w.endswith(("us", "ts", "mens")):
return w[:-1] + "zen"
# Words ending in -f usually get -ven: brief => brieven.
if w.endswith("f"):
return w[:-1] + "ven"
# Words ending in -um get -ums: museum => museums.
if w.endswith("um"):
return w + "s"
# Words ending in unstressed -ee or -ie get -ën: bacterie => bacteriën
if w.endswith("ie"):
return w + "s"
if w.endswith(("ee","ie")):
return w[:-1] + u"ën"
# Words ending in -heid get -heden: mogelijkheid => mogelijkheden
if w.endswith("heid"):
return w[:-4] + "heden"
# Words ending in -e -el -em -en -er -ie get -s: broer => broers.
if w.endswith((u"é", "e", "el", "em", "en", "er", "eu", "ie", "ue", "ui", "eau", "ah")):
return w + "s"
# Words ending in a vowel get 's: auto => auto's.
if w.endswith(VOWELS) or w.endswith("y") and not w.endswith("e"):
return w + "'s"
# Words ending in -or always get -en: motor => motoren.
if w.endswith("or"):
return w + "en"
# Words ending in -ij get -en: boerderij => boerderijen.
if w.endswith("ij"):
return w + "en"
# Words ending in two consonants get -en: hand => handen.
if len(w) > 1 and not is_vowel(w[-1]) and not is_vowel(w[-2]):
return w + "en"
# Words ending in one consonant with a short sound: fles => flessen.
if len(w) > 2 and not is_vowel(w[-1]) and not is_vowel(w[-3]):
return w + w[-1] + "en"
# Words ending in one consonant with a long sound: raam => ramen.
if len(w) > 2 and not is_vowel(w[-1]) and w[-2] == w[-3]:
return w[:-2] + w[-1] + "en"
return w + "en"
return w
#### SINGULARIZE ###################################################################################
singular_irregular = dict((v,k) for k,v in plural_irregular.iteritems())
def singularize(word, pos=NOUN, custom={}):
if word in custom.keys():
return custom[word]
w = word.lower()
if pos == NOUN and w in singular_irregular:
return singular_irregular[w]
if pos == NOUN and w.endswith((u"ën", "en", "s", "i")):
# auto's => auto
if w.endswith("'s"):
return w[:-2]
# broers => broer
if w.endswith("s"):
return w[:-1]
# academici => academicus
if w.endswith("ici"):
return w[:-1] + "us"
# feeën => fee
if w.endswith(u"ën") and w[:-2] in plural_irregular_een:
return w[:-2]
# bacteriën => bacterie
if w.endswith(u"ën"):
return w[:-2] + "e"
# mogelijkheden => mogelijkheid
if w.endswith("heden"):
return w[:-5] + "heid"
# artikelen => artikel
if w.endswith("elen") and not w.endswith("delen"):
return w[:-2]
# chinezen => chinees
if w.endswith("ezen"):
return w[:-4] + "ees"
# neven => neef
if w.endswith("even") and len(w) > 4 and not is_vowel(w[-5]):
return w[:-4] + "eef"
if w.endswith("en"):
w = w[:-2]
# ogen => oog
if w in ("og","om","ur"):
return w[:-1] + w[-2] + w[-1]
# hoenderen => hoen
if w.endswith("der") and w[:-3] in plural_irregular_deren:
return w[:-3]
# eieren => ei
if w.endswith("er") and w[:-2] in plural_irregular_eren:
return w[:-2]
# dagen => dag (not daag)
if w in plural_irregular_en:
return w
# huizen => huis
if w.endswith("z"):
return w[:-1] + "s"
# brieven => brief
if w.endswith("v"):
return w[:-1] + "f"
# motoren => motor
if w.endswith("or"):
return w
# flessen => fles
if len(w) > 1 and not is_vowel(w[-1]) and w[-1] == w[-2]:
return w[:-1]
# baarden => baard
if len(w) > 1 and not is_vowel(w[-1]) and not is_vowel(w[-2]):
return w
# boerderijen => boerderij
if w.endswith("ij"):
return w
# idealen => ideaal
if w.endswith(("eal", "ean", "eol", "ial", "ian", "iat", "iol")):
return w[:-1] + w[-2] + w[-1]
# ramen => raam
if len(w) > 2 and not is_vowel(w[-1]) and is_vowel(w[-2]) and not is_vowel(w[-3]):
return w[:-1] + w[-2] + w[-1]
return w
return w
#### VERB CONJUGATION ##############################################################################
class Verbs(_Verbs):
def __init__(self):
_Verbs.__init__(self, os.path.join(MODULE, "nl-verbs.txt"),
language = "nl",
format = [0, 1, 2, 3, 7, 8, 17, 18, 19, 23, 25, 24, 16, 9, 10, 11, 15, 33, 26, 27, 28, 32],
default = {
1: 0, 2: 0, 3: 0, 7: 0, # present singular
4: 7, 5: 7, 6: 7, # present plural
17: 25, 18: 25, 19: 25, 23: 25, # past singular
20: 23, 21: 23, 22: 23, # past plural
9: 16, 10: 16, 11: 16, 15: 16, # present singular negated
12: 15, 13: 15, 14: 15, # present plural negated
26: 33, 27: 33, 28: 33, # past singular negated
29: 32, 30: 32, 31: 32, 32: 33 # past plural negated
})
def load(self):
_Verbs.load(self)
self._inverse["was"] = "zijn" # Instead of "wassen".
self._inverse["waren"] = "zijn"
self._inverse["zagen"] = "zien"
self._inverse["wist"] = "weten"
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
This is problematic if a verb ending in -e is given in the past tense or gerund.
"""
v = verb.lower()
# Common prefixes: op-bouwen and ver-bouwen inflect like bouwen.
for prefix in ("aan", "be", "her", "in", "mee", "ont", "op", "over", "uit", "ver"):
if v.startswith(prefix) and v[len(prefix):] in self.inflections:
return prefix + self.inflections[v[len(prefix):]]
# Present participle -end: hengelend, knippend.
if v.endswith("end"):
b = v[:-3]
# Past singular -de or -te: hengelde, knipte.
elif v.endswith(("de", "det", "te", "tet")):
b = v[:-2]
# Past plural -den or -ten: hengelden, knipten.
elif v.endswith(("chten"),):
b = v[:-2]
elif v.endswith(("den", "ten")) and len(v) > 3 and is_vowel(v[-4]):
b = v[:-2]
elif v.endswith(("den", "ten")):
b = v[:-3]
# Past participle ge- and -d or -t: gehengeld, geknipt.
elif v.endswith(("d","t")) and v.startswith("ge"):
b = v[2:-1]
# Present 2nd or 3rd singular: wordt, denkt, snakt, wacht.
elif v.endswith(("cht"),):
b = v
elif v.endswith(("dt", "bt", "gt", "kt", "mt", "pt", "wt", "xt", "aait", "ooit")):
b = v[:-1]
elif v.endswith("t") and len(v) > 2 and not is_vowel(v[-2]):
b = v[:-1]
elif v.endswith("en") and len(v) > 3:
return v
else:
b = v
# hengel => hengelen (and not hengellen)
if len(b) > 2 and b.endswith(("el", "nder", "om", "tter")) and not is_vowel(b[-3]):
pass
# Long vowel followed by -f or -s: geef => geven.
elif len(b) > 2 and not is_vowel(b[-1]) and is_vowel(b[-2]) and is_vowel(b[-3])\
or b.endswith(("ijf", "erf"),):
if b.endswith("f"): b = b[:-1] + "v"
if b.endswith("s"): b = b[:-1] + "z"
if b[-2] == b[-3]:
b = b[:-2] + b[-1]
# Short vowel followed by consonant: snak => snakken.
elif len(b) > 1 and not is_vowel(b[-1]) and is_vowel(b[-2]) and not b.endswith(("er","ig")):
b = b + b[-1]
b = b + "en"
b = b.replace("vven", "ven") # omgevven => omgeven
b = b.replace("zzen", "zen") # genezzen => genezen
b = b.replace("aen", "aan") # doorgaen => doorgaan
return b
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
# Stem = infinitive minus -en.
b = b0 = re.sub("en$", "", v)
# zweven => zweef, graven => graaf
if b.endswith("v"): b = b[:-1] + "f"
if b.endswith("z"): b = b[:-1] + "s"
# Vowels with a long sound are doubled, we need to guess how it sounds:
if len(b) > 2 and not is_vowel(b[-1]) and is_vowel(b[-2]) and not is_vowel(b[-3]):
if not v.endswith(("elen", "deren", "keren", "nderen", "tteren")):
b = b[:-1] + b[-2] + b[-1]
# pakk => pak
if len(b) > 1 and not is_vowel(b[-1]) and b[-1] == b[-2]:
b = b[:-1]
# Present tense gets -t:
sg = not b.endswith("t") and b + "t" or b
# Past tense ending in a consonant in "xtc-koffieshop" gets -t, otherwise -d:
dt = b0 and b0[-1] in "xtckfshp" and "t" or (not b.endswith("d") and "d" or "")
# Past tense -e and handle common irregular inflections:
p = b + dt + "e"
for suffix, irregular in (("erfde", "ierf"), ("ijfde", "eef"), ("ingde", "ong"), ("inkte", "onk")):
if p.endswith(suffix):
p = p[:-len(suffix)] + irregular; break
# Past participle: ge-:
pp = re.sub("tt$", "t", "ge" + b + dt)
pp = pp.startswith(("geop", "gein", "geaf")) and pp[2:4]+"ge"+pp[4:] or pp # geopstart => opgestart
pp = pp.startswith(("gever", "gebe", "gege")) and pp[2:] or pp
return [v, b, sg, sg, v, b0+"end", p, p, p, b+dt+"en", p, pp]
verbs = Verbs()
conjugate, lemma, lexeme, tenses = \
verbs.conjugate, verbs.lemma, verbs.lexeme, verbs.tenses
#### ATTRIBUTIVE & PREDICATIVE #####################################################################
adjective_attributive = {
"civiel": "civiele",
"complex": "complexe",
"enkel": "enkele",
"grof": "grove",
"half": "halve",
"luttel": "luttele",
"mobiel": "mobiele",
"parijs": "parijse",
"ruw": "ruwe",
"simpel": "simpele",
"stabiel": "stabiele",
"steriel": "steriele",
"subtiel": "subtiele",
"teer": "tere"
}
def attributive(adjective):
""" For a predicative adjective, returns the attributive form (lowercase).
In Dutch, the attributive is formed with -e: "fel" => "felle kritiek".
"""
w = adjective.lower()
if w in adjective_attributive:
return adjective_attributive[w]
if w.endswith("e"):
return w
if w.endswith(("er","st")) and len(w) > 4:
return w + "e"
if w.endswith("ees"):
return w[:-2] + w[-1] + "e"
if w.endswith("el") and len(w) > 2 and not is_vowel(w[-3]):
return w + "e"
if w.endswith("ig"):
return w + "e"
if len(w) > 2 and (not is_vowel(w[-1]) and is_vowel(w[-2]) and is_vowel(w[-3]) or w[:-1].endswith("ij")):
if w.endswith("f"): w = w[:-1] + "v"
if w.endswith("s"): w = w[:-1] + "z"
if w[-2] == w[-3]:
w = w[:-2] + w[-1]
elif len(w) > 1 and is_vowel(w[-2]) and w.endswith(tuple("bdfgklmnprst")):
w = w + w[-1]
return w + "e"
adjective_predicative = dict((v,k) for k,v in adjective_attributive.iteritems())
adjective_predicative.update({
"moe": "moe",
"taboe": "taboe",
"voldoende": "voldoende"
})
def predicative(adjective):
""" Returns the predicative adjective (lowercase).
In Dutch, the attributive form preceding a noun is common:
"rake opmerking" => "raak", "straffe uitspraak" => "straf", "dwaze blik" => "dwaas".
"""
w = adjective.lower()
if w in adjective_predicative:
return adjective_predicative[w]
if w.endswith("ste"):
return w[:-1]
if w.endswith("ere"):
return w[:-1]
if w.endswith("bele"):
return w[:-1]
if w.endswith("le") and len(w) > 2 and is_vowel(w[-3]) and not w.endswith(("eule", "oele")):
return w[:-2] + w[-3] + "l"
if w.endswith("ve") and len(w) > 2 and is_vowel(w[-3]) and not w.endswith(("euve", "oeve", "ieve")):
return w[:-2] + w[-3] + "f"
if w.endswith("ze") and len(w) > 2 and is_vowel(w[-3]) and not w.endswith(("euze", "oeze", "ieze")):
return w[:-2] + w[-3] + "s"
if w.endswith("ve"):
return w[:-2] + "f"
if w.endswith("ze"):
return w[:-2] + "s"
if w.endswith("e") and len(w) > 2:
if not is_vowel(w[-2]) and w[-2] == w[-3]:
return w[:-2]
if len(w) > 3 and not is_vowel(w[-2]) and is_vowel(w[-3]) and w[-3] != "i" and not is_vowel(w[-4]):
return w[:-2] + w[-3] + w[-2]
return w[:-1]
return w
|
EricSchles/pattern
|
pattern/text/nl/inflect.py
|
Python
|
bsd-3-clause
| 16,402
|
[
"MOE"
] |
5db99193d457dda5adf8b16d3bd69ef3c63485adb15f7c7a448f2fffe8e7df2e
|
__author__ = 'Matteo'
__doc__ = ''''''
N = "\n"
T = "\t"
# N="<br/>"
from Bio import SeqIO
from Bio import Seq
import csv
import math
from Bio.Blast import NCBIXML
def get_genome(fp):
return list(SeqIO.parse(open(fp, "rU"), "genbank"))
def genelooper(genome):
return [gene for chr in genome for gene in chr.features if gene.type =="CDS"]
def depr_joiner(chr, a,b,c,reversed=False,to_stop=False): #this assumes that the locations of the genes are set right.
if reversed:
fore=chr[a:b-1-((c-b+1) % 3)].seq
print((c-b+1) % 3 +1)
aft=chr[b:c].seq
new=fore+aft
return new.reverse_complement().translate(to_stop=to_stop)
else:
fore=chr[a:b+((a-b+1) % 3)].seq
aft=chr[b:c].seq
new=fore+aft
return new.translate(to_stop=to_stop)
def joiner1(chr, a,b,c,reversed=False,to_stop=True): #junked
prot=[]
target=(c-a+1)/3
if reversed:
prot.append(str(chr[a:c].seq.reverse_complement().translate(to_stop=to_stop)))
for n in range(60):
new=chr[a:b].seq+chr[b-n:c].seq
prot.append(str(new.reverse_complement().translate(to_stop=to_stop)))
new=chr[a:b-n].seq+chr[b:c].seq
prot.append(str(new.reverse_complement().translate(to_stop=to_stop)))
else:
prot.append(str(chr[a:c].seq.translate(to_stop=to_stop)))
for n in range(60):
new=chr[a:b-n].seq+chr[b:c].seq
prot.append(str(new.translate(to_stop=to_stop)))
new=chr[a:b].seq+chr[b-n:c].seq
prot.append(str(new.translate(to_stop=to_stop)))
p2=sorted(prot,key=lambda p: len(p))
print(target,len(p2[-1]))
#print(N.join(p2))
return p2[-1]
def joiner(chr, a,b,c,reversed=False,to_stop=True): #shoddy
target=(c-a+1)/3
if reversed:
trans=chr[a:c].seq.reverse_complement().translate(to_stop=to_stop)
print(len(trans),trans)
n=1
while len(trans)< target-2-(n/1.5):
seqs=[]
print(len(trans),target-2-(n/1.5))
new=chr[a:b].seq+chr[b-n:c].seq
seqs.append(new.reverse_complement().translate(to_stop=to_stop))
new=chr[a:b-n].seq+chr[b:c].seq
seqs.append(new.reverse_complement().translate(to_stop=to_stop))
new=chr[a:b-n].seq+chr[b-n:c].seq
seqs.append(new.reverse_complement().translate(to_stop=to_stop))
trans=max(seqs,key=lambda s:len(s))
n+=1
else:
prot.append(str(chr[a:c].seq.translate(to_stop=to_stop)))
for n in range(60):
new=chr[a:b-n].seq+chr[b:c].seq
prot.append(str(new.translate(to_stop=to_stop)))
new=chr[a:b].seq+chr[b-n:c].seq
prot.append(str(new.translate(to_stop=to_stop)))
p2=sorted(prot,key=lambda p: len(p))
print(target,len(p2[-1]))
#print(N.join(p2))
return p2[-1]
def make_slipcandidates(genome):
fasta=open('slip_candidates.fa','w')
threshhold=50
for chr in genome:
previous=None
for gene in chr.features:
if gene.type=='CDS':
if previous:
if gene.location.strand==previous.location.strand and (gene.qualifiers['product'][0]==previous.qualifiers['product'][0] or gene.location.start-previous.location.end<threshhold):
if gene.location.strand<0:
gene.qualifiers['locus_tag'][0]=gene.qualifiers['locus_tag'][0]+'-'+previous.qualifiers['locus_tag'][0]
gene.qualifiers['translation'][0]=gene.qualifiers['translation'][0]+previous.qualifiers['translation'][0]
else:
gene.qualifiers['locus_tag'][0]=previous.qualifiers['locus_tag'][0]+'-'+gene.qualifiers['locus_tag'][0]
gene.qualifiers['translation'][0]=previous.qualifiers['translation'][0]+gene.qualifiers['translation'][0]
else:
fasta.write('>'+previous.qualifiers['locus_tag'][0]+N+previous.qualifiers['translation'][0]+N)
previous=gene
def locus2prod(genome,infile,outfile):
text=open(infile,'r').read()
text=text.replace('-','---')
for gene in genelooper(genome):
text=text.replace(gene.qualifiers['locus_tag'][0],gene.qualifiers['product'][0])
open(outfile,'w').write(text)
def get_prot(infile,outfile):
__doc__='''Parse the blast results of prot vs. prot and give a table '''
def picker(rec):
if rec.alignments:
hit=rec.alignments[0].hsps[0]
mut=[]
mut2=""
for i,m in enumerate(hit.match):
if m==" " or m=="+":
if i==0:
mut2=hit.sbjct[i]+str(i+int(hit.sbjct_start))+hit.query[i]
else:
mut.append(hit.sbjct[i]+str(i+int(hit.sbjct_start))+hit.query[i])
top={"qid": rec.query,"sid":rec.alignments[0].hit_def,"len":rec.query_length,
"qst":hit.query_start,"qnd":hit.query_end,"sst":hit.sbjct_start,"snd":hit.sbjct_end,"ssq":hit.sbjct,"qsq":hit.query,"m":hit.match,"mut":mut,"V1M":mut2,"nm":len(mut)}
else:
top={"qid": rec.query,"sid":"#UNMATCHED","len":rec.query_length,
"qst":0,"qnd":0,"sst":0,"snd":0,"ssq":"","qsq":"","m":"","nm":"","V1M":""}
return top
w=csv.DictWriter(open(outfile,"w"),"qid sid qlen qst qnd sst snd qsq ssq m mut V1M nm".split())
w.writeheader()
for rec in NCBIXML.parse(open(infile)):
w.writerow(picker(rec))
if __name__ == "__main__":
genome=get_genome("Gthg_TM242_v3.0.gb")
#make_slipcandidates(genome)
#get_prot("enriched.xml","prot_cons_fusion.csv")
locus2prod(genome,'mutanda.txt','mutata.txt')
|
matteoferla/Geobacillus
|
fusionmatch.py
|
Python
|
gpl-2.0
| 5,821
|
[
"BLAST"
] |
31ee510c1402cab7eeda56da56f9472c861038f5e5e0ea73ff6456c2d91a5ef9
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""check for signs of poor design"""
from astroid import Function, If, InferenceError
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
import re
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile('_.*')
def class_is_abstract(klass):
"""return true if the given class node should be considered as an abstract
class
"""
for attr in klass.values():
if isinstance(attr, Function):
if attr.is_abstract(pass_is_abstract=False):
return True
return False
MSGS = {
'R0901': ('Too many ancestors (%s/%s)',
'too-many-ancestors',
'Used when class has too many parent classes, try to reduce \
this to get a simpler (and so easier to use) class.'),
'R0902': ('Too many instance attributes (%s/%s)',
'too-many-instance-attributes',
'Used when class has too many instance attributes, try to reduce \
this to get a simpler (and so easier to use) class.'),
'R0903': ('Too few public methods (%s/%s)',
'too-few-public-methods',
'Used when class has too few public methods, so be sure it\'s \
really worth it.'),
'R0904': ('Too many public methods (%s/%s)',
'too-many-public-methods',
'Used when class has too many public methods, try to reduce \
this to get a simpler (and so easier to use) class.'),
'R0911': ('Too many return statements (%s/%s)',
'too-many-return-statements',
'Used when a function or method has too many return statement, \
making it hard to follow.'),
'R0912': ('Too many branches (%s/%s)',
'too-many-branches',
'Used when a function or method has too many branches, \
making it hard to follow.'),
'R0913': ('Too many arguments (%s/%s)',
'too-many-arguments',
'Used when a function or method takes too many arguments.'),
'R0914': ('Too many local variables (%s/%s)',
'too-many-locals',
'Used when a function or method has too many local variables.'),
'R0915': ('Too many statements (%s/%s)',
'too-many-statements',
'Used when a function or method has too many statements. You \
should then split it in smaller functions / methods.'),
'R0921': ('Abstract class not referenced',
'abstract-class-not-used',
'Used when an abstract class is not used as ancestor anywhere.'),
'R0922': ('Abstract class is only referenced %s times',
'abstract-class-little-used',
'Used when an abstract class is used less than X times as \
ancestor.'),
'R0923': ('Interface not implemented',
'interface-not-implemented',
'Used when an interface class is not implemented anywhere.'),
}
class MisdesignChecker(BaseChecker):
"""checks for sign of poor/misdesign:
* number of methods, attributes, local variables...
* size, complexity of functions, methods
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'design'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('max-args',
{'default' : 5, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of arguments for function / method'}
),
('ignored-argument-names',
{'default' : IGNORED_ARGUMENT_NAMES,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Argument names that match this expression will be '
'ignored. Default to name with leading underscore'}
),
('max-locals',
{'default' : 15, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of locals for function / method body'}
),
('max-returns',
{'default' : 6, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of return / yield for function / '
'method body'}
),
('max-branches',
{'default' : 12, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of branch for function / method body'}
),
('max-statements',
{'default' : 50, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of statements in function / method '
'body'}
),
('max-parents',
{'default' : 7,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Maximum number of parents for a class (see R0901).'}
),
('max-attributes',
{'default' : 7,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Maximum number of attributes for a class \
(see R0902).'}
),
('min-public-methods',
{'default' : 2,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Minimum number of public methods for a class \
(see R0903).'}
),
('max-public-methods',
{'default' : 20,
'type' : 'int',
'metavar' : '<num>',
'help' : 'Maximum number of public methods for a class \
(see R0904).'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self._returns = None
self._branches = None
self._used_abstracts = None
self._used_ifaces = None
self._abstracts = None
self._ifaces = None
self._stmts = 0
def open(self):
"""initialize visit variables"""
self.stats = self.linter.add_stats()
self._returns = []
self._branches = []
self._used_abstracts = {}
self._used_ifaces = {}
self._abstracts = []
self._ifaces = []
# Check 'R0921', 'R0922', 'R0923'
def close(self):
"""check that abstract/interface classes are used"""
for abstract in self._abstracts:
if not abstract in self._used_abstracts:
self.add_message('R0921', node=abstract)
elif self._used_abstracts[abstract] < 2:
self.add_message('R0922', node=abstract,
args=self._used_abstracts[abstract])
for iface in self._ifaces:
if not iface in self._used_ifaces:
self.add_message('R0923', node=iface)
@check_messages('R0901', 'R0902', 'R0903', 'R0904', 'R0921', 'R0922', 'R0923')
def visit_class(self, node):
"""check size of inheritance hierarchy and number of instance attributes
"""
self._inc_branch()
# Is the total inheritance hierarchy is 7 or less?
nb_parents = len(list(node.ancestors()))
if nb_parents > self.config.max_parents:
self.add_message('R0901', node=node,
args=(nb_parents, self.config.max_parents))
# Does the class contain less than 20 attributes for
# non-GUI classes (40 for GUI)?
# FIXME detect gui classes
if len(node.instance_attrs) > self.config.max_attributes:
self.add_message('R0902', node=node,
args=(len(node.instance_attrs),
self.config.max_attributes))
# update abstract / interface classes structures
if class_is_abstract(node):
self._abstracts.append(node)
elif node.type == 'interface' and node.name != 'Interface':
self._ifaces.append(node)
for parent in node.ancestors(False):
if parent.name == 'Interface':
continue
self._used_ifaces[parent] = 1
try:
for iface in node.interfaces():
self._used_ifaces[iface] = 1
except InferenceError:
# XXX log ?
pass
for parent in node.ancestors():
try:
self._used_abstracts[parent] += 1
except KeyError:
self._used_abstracts[parent] = 1
@check_messages('R0901', 'R0902', 'R0903', 'R0904', 'R0921', 'R0922', 'R0923')
def leave_class(self, node):
"""check number of public methods"""
nb_public_methods = 0
special_methods = set()
for method in node.methods():
if not method.name.startswith('_'):
nb_public_methods += 1
if method.name.startswith("__"):
special_methods.add(method.name)
# Does the class contain less than 20 public methods ?
if nb_public_methods > self.config.max_public_methods:
self.add_message('R0904', node=node,
args=(nb_public_methods,
self.config.max_public_methods))
# stop here for exception, metaclass and interface classes
if node.type != 'class':
return
# Does the class contain more than 5 public methods ?
if nb_public_methods < self.config.min_public_methods:
self.add_message('R0903', node=node,
args=(nb_public_methods,
self.config.min_public_methods))
@check_messages('R0911', 'R0912', 'R0913', 'R0914', 'R0915')
def visit_function(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self._inc_branch()
# init branch and returns counters
self._returns.append(0)
self._branches.append(0)
# check number of arguments
args = node.args.args
if args is not None:
ignored_args_num = len(
[arg for arg in args
if self.config.ignored_argument_names.match(arg.name)])
argnum = len(args) - ignored_args_num
if argnum > self.config.max_args:
self.add_message('R0913', node=node,
args=(len(args), self.config.max_args))
else:
ignored_args_num = 0
# check number of local variables
locnum = len(node.locals) - ignored_args_num
if locnum > self.config.max_locals:
self.add_message('R0914', node=node,
args=(locnum, self.config.max_locals))
# init statements counter
self._stmts = 1
@check_messages('R0911', 'R0912', 'R0913', 'R0914', 'R0915')
def leave_function(self, node):
"""most of the work is done here on close:
checks for max returns, branch, return in __init__
"""
returns = self._returns.pop()
if returns > self.config.max_returns:
self.add_message('R0911', node=node,
args=(returns, self.config.max_returns))
branches = self._branches.pop()
if branches > self.config.max_branches:
self.add_message('R0912', node=node,
args=(branches, self.config.max_branches))
# check number of statements
if self._stmts > self.config.max_statements:
self.add_message('R0915', node=node,
args=(self._stmts, self.config.max_statements))
def visit_return(self, _):
"""count number of returns"""
if not self._returns:
return # return outside function, reported by the base checker
self._returns[-1] += 1
def visit_default(self, node):
"""default visit method -> increments the statements counter if
necessary
"""
if node.is_statement:
self._stmts += 1
def visit_tryexcept(self, node):
"""increments the branches counter"""
branches = len(node.handlers)
if node.orelse:
branches += 1
self._inc_branch(branches)
self._stmts += branches
def visit_tryfinally(self, _):
"""increments the branches counter"""
self._inc_branch(2)
self._stmts += 2
def visit_if(self, node):
"""increments the branches counter"""
branches = 1
# don't double count If nodes coming from some 'elif'
if node.orelse and (len(node.orelse) > 1 or
not isinstance(node.orelse[0], If)):
branches += 1
self._inc_branch(branches)
self._stmts += branches
def visit_while(self, node):
"""increments the branches counter"""
branches = 1
if node.orelse:
branches += 1
self._inc_branch(branches)
visit_for = visit_while
def _inc_branch(self, branchesnum=1):
"""increments the branches counter"""
branches = self._branches
for i in xrange(len(branches)):
branches[i] += branchesnum
# FIXME: make a nice report...
def register(linter):
"""required method to auto register this checker """
linter.register_checker(MisdesignChecker(linter))
|
godfryd/pylint
|
checkers/design_analysis.py
|
Python
|
gpl-2.0
| 14,359
|
[
"VisIt"
] |
1ba9760ed67276227aec49a6bbe770b156d205e0d6d45bbca557dffcf6520202
|
## \file
## \ingroup tutorial_roofit
## \notebook -nodraw
##
## \brief Likelihood and minimization: fitting with constraints
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
from __future__ import print_function
import ROOT
# Create model and dataset
# ----------------------------------------------
# Construct a Gaussian p.d.f
x = ROOT.RooRealVar("x", "x", -10, 10)
m = ROOT.RooRealVar("m", "m", 0, -10, 10)
s = ROOT.RooRealVar("s", "s", 2, 0.1, 10)
gauss = ROOT.RooGaussian("gauss", "gauss(x,m,s)", x, m, s)
# Construct a flat p.d.f (polynomial of 0th order)
poly = ROOT.RooPolynomial("poly", "poly(x)", x)
# model = f*gauss + (1-f)*poly
f = ROOT.RooRealVar("f", "f", 0.5, 0., 1.)
model = ROOT.RooAddPdf(
"model",
"model",
ROOT.RooArgList(
gauss,
poly),
ROOT.RooArgList(f))
# Generate small dataset for use in fitting below
d = model.generate(ROOT.RooArgSet(x), 50)
# Create constraint pdf
# -----------------------------------------
# Construct Gaussian constraint p.d.f on parameter f at 0.8 with
# resolution of 0.1
fconstraint = ROOT.RooGaussian(
"fconstraint",
"fconstraint",
f,
ROOT.RooFit.RooConst(0.8),
ROOT.RooFit.RooConst(0.1))
# Method 1 - add internal constraint to model
# -------------------------------------------------------------------------------------
# Multiply constraint term with regular p.d.f using ROOT.RooProdPdf
# Specify in fitTo() that internal constraints on parameter f should be
# used
# Multiply constraint with p.d.f
modelc = ROOT.RooProdPdf(
"modelc", "model with constraint", ROOT.RooArgList(model, fconstraint))
# Fit model (without use of constraint term)
r1 = model.fitTo(d, ROOT.RooFit.Save())
# Fit modelc with constraint term on parameter f
r2 = modelc.fitTo(
d,
ROOT.RooFit.Constrain(
ROOT.RooArgSet(f)),
ROOT.RooFit.Save())
# Method 2 - specify external constraint when fitting
# ------------------------------------------------------------------------------------------
# Construct another Gaussian constraint p.d.f on parameter f at 0.8 with
# resolution of 0.1
fconstext = ROOT.RooGaussian("fconstext", "fconstext", f, ROOT.RooFit.RooConst(
0.2), ROOT.RooFit.RooConst(0.1))
# Fit with external constraint
r3 = model.fitTo(d, ROOT.RooFit.ExternalConstraints(
ROOT.RooArgSet(fconstext)), ROOT.RooFit.Save())
# Print the fit results
print("fit result without constraint (data generated at f=0.5)")
r1.Print("v")
print("fit result with internal constraint (data generated at f=0.5, is f=0.8+/-0.2)")
r2.Print("v")
print("fit result with (another) external constraint (data generated at f=0.5, is f=0.2+/-0.1)")
r3.Print("v")
|
karies/root
|
tutorials/roofit/rf604_constraints.py
|
Python
|
lgpl-2.1
| 2,727
|
[
"Gaussian"
] |
1105e1261a78ac09e2029a732c97c8d79142f3503e6fa7a3cbcd3cd6d329dea3
|
"""Scatterplots."""
import pandas as pd
import matplotlib.pyplot as plt
from util import get_data, plot_data, compute_daily_returns
import numpy as np
def test_run():
# Read data
dates = pd.date_range('2009-01-01', '2012-12-31') # date range as index
symbols = ['SPY','XOM','GLD']
df = get_data(symbols, dates) # get data for each symbol
plot_data(df)
# Compute daily returns
daily_returns = compute_daily_returns(df)
plot_data(daily_returns, title = "Daily returns", ylabel = "Daily returns")
# Scatterplots SPY versus XOM
daily_returns.plot(kind = 'scatter', x = 'SPY', y = 'XOM')
beta_XOM, alpha_XOM = np.polyfit(daily_returns['SPY'], daily_returns['XOM'], 1)
print "beta_XOM=", beta_XOM
print "alpha_XOM=", alpha_XOM
plt.plot(daily_returns['SPY'], beta_XOM * daily_returns['SPY'] + alpha_XOM, '-', color = 'r')
plt.show()
# Scatterplots SPY versus GLD
daily_returns.plot(kind = 'scatter', x = 'SPY', y = 'GLD')
beta_GLD, alpha_GLD = np.polyfit(daily_returns['SPY'], daily_returns['GLD'], 1)
print "beta_GLD=", beta_GLD
print "alpha_GLD=", alpha_GLD
plt.plot(daily_returns['SPY'], beta_GLD * daily_returns['SPY'] + alpha_GLD, '-', color = 'r')
plt.show()
# Comment: beta_XOM is fairly high than beta_GLD, so XOM is more reactive
# to the market than GLD.
# On the other hand, alpha values denote how well the products performs well
# with respect to SPY. In this case, alpha_XOM is negative, and alpha_GLD is
# positive. This means that GLD performs better.
# Calculate correlation coefficient
print daily_returns.corr(method = 'pearson')
# As you have seen in this lesson, the distribution of daily returns for
# stocks and the market look very similar to a Gaussian.
# This property persists when we look at weekly, monthly, and annual returns
# as well.
# If they were really Gaussian we'd say the returns were normally distributed.
# In many cases in financial research we assume the returns are normally distributed.
# But this can be dangerous because it ignores kurtosis or the probability
# in the tails.
# In the early 2000s investment banks built bonds based on mortgages.
# They assumed that the distribution of returns for these mortgages was
# normally distributed.
# On thet basis they were able to show that these bonds had a very low probability of default.
# But they made two mistakes. First, they assumed that the return of each
# of these mortgages was independent; and two that this return would be
# normally distributed.
# Both of these assumptions proved to be wrong, as massive number of omeowners
# defaulted on their mortgages.
# It was these defaults that precipitated the great recession of 2008.
#
if __name__ == "__main__":
test_run()
|
bluemurder/mlfl
|
udacity course code/01-06-scatterplots.py
|
Python
|
mit
| 2,878
|
[
"Gaussian"
] |
e78aabdd0369bcd3410ae02d4f6556f2253811dab58ea9d18794471b53a16502
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
test_fsed.py
(c) Will Roberts 12 December, 2015
Test the ``fsed`` module.
'''
from __future__ import absolute_import, print_function, unicode_literals
from .. import ahocorasick
from .. import fsed
from click.testing import CliRunner
from fsed.compat import PY3
from os import path
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import gzip
import os
import tempfile
import unittest
HERE = path.abspath(path.dirname(__file__))
class CMStringIO(StringIO):
'''StringIO object with context manager.'''
def __init__(self, *args, **kwargs):
'''Ctor.'''
StringIO.__init__(self, *args, **kwargs)
def __enter__(self):
self.seek(0)
return self
def __exit__(self, _type, _value, _tb):
pass
def click_command_runner(cli, args=None):
fhandle, tempfile_path = tempfile.mkstemp()
os.close(fhandle)
if args:
args = [(tempfile_path if x == '%t' else x) for x in args]
exit_code = output = result = None
try:
runner = CliRunner()
result = runner.invoke(cli, args)
exit_code = result.exit_code
output = result.output
with open(tempfile_path) as input_file:
result = input_file.read()
if not PY3:
result = result.decode('utf-8')
finally:
# NOTE: To retain the tempfile if the test fails, remove
# the try-finally clause.
os.remove(tempfile_path)
return (exit_code, output, result)
PATTERN_TSV = b'''\\bMarco Polo\tMarco_Polo
Kublai Khan\tKublai_Khan
Christopher Columbus\tChristopher_Columbus
and uncle\tand_uncle'''
PATTERN_SED = b'''s/\\bMarco Polo/Marco_Polo/
s/Kublai Khan/Kublai_Khan/
s.Christopher Columbus.Christopher_Columbus.
s/and uncle/and_uncle/'''
INPUT_TEXT = '''and uncle
sand uncle
s and uncle
Kublai Khan
bKublai Khan
Marco Polo
bMarco Polo'''
# without --words
WITHOUT_WORDS_OUTPUT = '''and_uncle
sand_uncle
s and_uncle
Kublai_Khan
bKublai_Khan
Marco_Polo
bMarco Polo'''
# with --words
WITH_WORDS_OUTPUT = '''and_uncle
sand uncle
s and_uncle
Kublai_Khan
bKublai Khan
Marco_Polo
bMarco Polo'''
class TestFsed(unittest.TestCase):
'''
Unit tests for the `fsed` module.
'''
def test_detect_format(self):
'''
Checks the fsed.detect_pattern_format function.
'''
pattern_file = CMStringIO(PATTERN_TSV)
self.assertEqual(fsed.detect_pattern_format(pattern_file, 'utf-8', False),
(True, True))
pattern_file = CMStringIO(PATTERN_SED)
self.assertEqual(fsed.detect_pattern_format(pattern_file, 'utf-8', False),
(False, True))
def test_rewriting_tsv(self):
'''
Tests the fsed.rewrite_str_with_trie function.
'''
pattern_file = CMStringIO(PATTERN_TSV)
trie, boundaries = fsed.build_trie(pattern_file, 'tsv', 'utf-8', False)
self.assertTrue(boundaries)
self.assertEqual(fsed.rewrite_str_with_trie(INPUT_TEXT, trie, boundaries),
WITHOUT_WORDS_OUTPUT)
trie, boundaries = fsed.build_trie(pattern_file, 'tsv', 'utf-8', True)
self.assertTrue(boundaries)
self.assertEqual(fsed.rewrite_str_with_trie(INPUT_TEXT, trie, boundaries),
WITH_WORDS_OUTPUT)
def test_rewriting_sed(self):
'''
Tests the fsed.rewrite_str_with_trie function.
'''
pattern_file = CMStringIO(PATTERN_SED)
trie, boundaries = fsed.build_trie(pattern_file, 'sed', 'utf-8', False)
self.assertTrue('C' in trie.root)
self.assertTrue(boundaries)
self.assertEqual(fsed.rewrite_str_with_trie(INPUT_TEXT, trie, boundaries),
WITHOUT_WORDS_OUTPUT)
trie, boundaries = fsed.build_trie(pattern_file, 'sed', 'utf-8', True)
self.assertTrue(boundaries)
self.assertEqual(fsed.rewrite_str_with_trie(INPUT_TEXT, trie, boundaries),
WITH_WORDS_OUTPUT)
def test_slow(self):
'''
Tests the fsed.rewrite_str_with_trie function with the ``slow``
flag on.
'''
trie = ahocorasick.AhoCorasickTrie()
trie['a'] = '(a)'
trie['ab'] = '(ab)'
trie['bab'] = '(bab)'
trie['bc'] = '(bc)'
trie['bca'] = '(bca)'
trie['c'] = '(c)'
trie['caa'] = '(caa)'
self.assertEqual(fsed.rewrite_str_with_trie('abccab', trie, slow=False),
'(a)(bc)(c)(a)b')
self.assertEqual(fsed.rewrite_str_with_trie('abccab', trie, slow=True),
'(a)(bc)(c)(ab)')
def test_end2end(self):
with gzip.open(path.join(HERE, 'sed-output.utf8.txt.gz')) as input_file:
sed_output = input_file.read().decode('utf-8')
with gzip.open(path.join(HERE, 'perl-output.utf8.txt.gz')) as input_file:
perl_output = input_file.read().decode('utf-8')
exit_code, output, result = click_command_runner(
fsed.main, ['-w',
'-o', '%t',
path.join(HERE, 'fsed-testpats.tsv'),
path.join(HERE, 'fsed-testinput.utf8.txt.gz')])
self.assertEqual(exit_code, 0)
#self.assertEqual(output, '')
self.assertEqual(result, sed_output)
self.assertEqual(result, perl_output)
exit_code, output, result = click_command_runner(
fsed.main, ['-o', '%t',
path.join(HERE, 'fsed-testpats.wb.sed'),
path.join(HERE, 'fsed-testinput.utf8.txt.gz')])
self.assertEqual(exit_code, 0)
#self.assertEqual(output, '')
self.assertEqual(result, sed_output)
self.assertEqual(result, perl_output)
if __name__ == '__main__':
unittest.main()
|
wroberts/fsed
|
fsed/tests/test_fsed.py
|
Python
|
mit
| 5,901
|
[
"COLUMBUS"
] |
a67bddb42ad7776d5493f7ce6299b936365eee9d7616abdad8d0b2f6f0bddbaf
|
""" Class that contains client access to the StorageManagerDB handler.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
import random
import errno
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Base.Client import Client, createClient
from DIRAC.Core.Utilities.DErrno import cmpError
from DIRAC.Core.Utilities.Proxy import UserProxy
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Storage.StorageElement import StorageElement
def getFilesToStage(lfnList, jobState=None, checkOnlyTapeSEs=None, jobLog=None):
"""Utility that returns out of a list of LFNs those files that are offline,
and those for which at least one copy is online
"""
if not lfnList:
return S_OK({"onlineLFNs": [], "offlineLFNs": {}, "failedLFNs": [], "absentLFNs": {}})
dm = DataManager()
if isinstance(lfnList, six.string_types):
lfnList = [lfnList]
lfnListReplicas = dm.getReplicasForJobs(lfnList, getUrl=False)
if not lfnListReplicas["OK"]:
return lfnListReplicas
offlineLFNsDict = {}
onlineLFNs = {}
offlineLFNs = {}
absentLFNs = {}
failedLFNs = set()
if lfnListReplicas["Value"]["Failed"]:
# Check if files are not existing
for lfn, reason in lfnListReplicas["Value"]["Failed"].items():
# FIXME: awful check until FC returns a proper error
if cmpError(reason, errno.ENOENT) or "No such file" in reason:
# The file doesn't exist, job must be Failed
# FIXME: it is not possible to return here an S_ERROR(), return the message only
absentLFNs[lfn] = S_ERROR(errno.ENOENT, "File not in FC")["Message"]
if absentLFNs:
return S_OK(
{
"onlineLFNs": list(onlineLFNs),
"offlineLFNs": offlineLFNsDict,
"failedLFNs": list(failedLFNs),
"absentLFNs": absentLFNs,
}
)
return S_ERROR("Failures in getting replicas")
lfnListReplicas = lfnListReplicas["Value"]["Successful"]
# If a file is reported here at a tape SE, it is not at a disk SE as we use disk in priority
# We shall check all file anyway in order to make sure they exist
seToLFNs = dict()
for lfn, ses in lfnListReplicas.items():
for se in ses:
seToLFNs.setdefault(se, list()).append(lfn)
if seToLFNs:
if jobState:
# Get user name and group from the job state
userName = jobState.getAttribute("Owner")
if not userName["OK"]:
return userName
userName = userName["Value"]
userGroup = jobState.getAttribute("OwnerGroup")
if not userGroup["OK"]:
return userGroup
userGroup = userGroup["Value"]
else:
userName = None
userGroup = None
# Check whether files are Online or Offline, or missing at SE
result = _checkFilesToStage(
seToLFNs,
onlineLFNs,
offlineLFNs,
absentLFNs, # pylint: disable=unexpected-keyword-arg
checkOnlyTapeSEs=checkOnlyTapeSEs,
jobLog=jobLog,
proxyUserName=userName,
proxyUserGroup=userGroup,
executionLock=True,
)
if not result["OK"]:
return result
failedLFNs = set(lfnList) - set(onlineLFNs) - set(offlineLFNs) - set(absentLFNs)
# Get the online SEs
dmsHelper = DMSHelpers()
onlineSEs = set(se for ses in onlineLFNs.values() for se in ses)
onlineSites = set(dmsHelper.getLocalSiteForSE(se).get("Value") for se in onlineSEs) - {None}
for lfn in offlineLFNs:
ses = offlineLFNs[lfn]
if len(ses) == 1:
# No choice, let's go
offlineLFNsDict.setdefault(ses[0], list()).append(lfn)
continue
# Try and get an SE at a site already with online files
found = False
if onlineSites:
# If there is at least one online site, select one
for se in ses:
site = dmsHelper.getLocalSiteForSE(se)
if site["OK"]:
if site["Value"] in onlineSites:
offlineLFNsDict.setdefault(se, list()).append(lfn)
found = True
break
# No online site found in common, select randomly
if not found:
offlineLFNsDict.setdefault(random.choice(ses), list()).append(lfn)
return S_OK(
{
"onlineLFNs": list(onlineLFNs),
"offlineLFNs": offlineLFNsDict,
"failedLFNs": list(failedLFNs),
"absentLFNs": absentLFNs,
"onlineSites": onlineSites,
}
)
def _checkFilesToStage(
seToLFNs,
onlineLFNs,
offlineLFNs,
absentLFNs,
checkOnlyTapeSEs=None,
jobLog=None,
proxyUserName=None,
proxyUserGroup=None,
executionLock=None,
):
"""
Checks on SEs whether the file is NEARLINE or ONLINE
onlineLFNs, offlineLFNs and absentLFNs are modified to contain the files found online
If checkOnlyTapeSEs is True, disk replicas are not checked
As soon as a replica is found Online for a file, no further check is made
"""
# Only check on storage if it is a tape SE
if jobLog is None:
logger = gLogger
else:
logger = jobLog
if checkOnlyTapeSEs is None:
# Default value is True
checkOnlyTapeSEs = True
failed = {}
for se, lfnsInSEList in seToLFNs.items():
# If we have found already all files online at another SE, no need to check the others
# but still we want to set the SE as Online if not a TapeSE
vo = getVOForGroup(proxyUserGroup)
seObj = StorageElement(se, vo=vo)
status = seObj.getStatus()
if not status["OK"]:
return status
tapeSE = status["Value"]["TapeSE"]
diskSE = status["Value"]["DiskSE"]
# If requested to check only Tape SEs and the file is at a diskSE, we guess it is Online...
filesToCheck = []
for lfn in lfnsInSEList:
# If the file had already been found accessible at an SE, only check that this one is on disk
diskIsOK = checkOnlyTapeSEs or (lfn in onlineLFNs)
if diskIsOK and diskSE:
onlineLFNs.setdefault(lfn, []).append(se)
elif not diskIsOK or (tapeSE and (lfn not in onlineLFNs)):
filesToCheck.append(lfn)
if not filesToCheck:
continue
# We have to use a new SE object because it caches the proxy!
with UserProxy(
proxyUserName=proxyUserName, proxyUserGroup=proxyUserGroup, executionLock=executionLock
) as proxyResult:
if proxyResult["OK"]:
fileMetadata = StorageElement(se, vo=vo).getFileMetadata(filesToCheck)
else:
fileMetadata = proxyResult
if not fileMetadata["OK"]:
failed[se] = dict.fromkeys(filesToCheck, fileMetadata["Message"])
else:
if fileMetadata["Value"]["Failed"]:
failed[se] = fileMetadata["Value"]["Failed"]
# is there at least one replica online?
for lfn, mDict in fileMetadata["Value"]["Successful"].items():
# SRM returns Cached, but others may only return Accessible
if mDict.get("Cached", mDict["Accessible"]):
onlineLFNs.setdefault(lfn, []).append(se)
elif tapeSE:
# A file can be staged only at Tape SE
offlineLFNs.setdefault(lfn, []).append(se)
else:
# File not available at a diskSE... we shall retry later
pass
# Doesn't matter if some files are Offline if they are also online
for lfn in set(offlineLFNs) & set(onlineLFNs):
offlineLFNs.pop(lfn)
# If the file was found staged, ignore possible errors, but print out errors
for se, failedLfns in list(failed.items()):
logger.error("Errors when getting files metadata", "at %s" % se)
for lfn, reason in list(failedLfns.items()):
if lfn in onlineLFNs:
logger.warn(reason, "for %s, but there is an online replica" % lfn)
failed[se].pop(lfn)
else:
logger.error(reason, "for %s, no online replicas" % lfn)
if cmpError(reason, errno.ENOENT):
absentLFNs.setdefault(lfn, []).append(se)
failed[se].pop(lfn)
if not failed[se]:
failed.pop(se)
# Find the files that do not exist at SE
if failed:
logger.error(
"Error getting metadata", "for %d files" % len(set(lfn for lfnList in failed.values() for lfn in lfnList))
)
for lfn in absentLFNs:
seList = absentLFNs[lfn]
# FIXME: it is not possible to return here an S_ERROR(), return the message only
absentLFNs[lfn] = S_ERROR(errno.ENOENT, "File not at %s" % ",".join(sorted(seList)))["Message"]
# Format the error for absent files
return S_OK()
@createClient("StorageManagement/StorageManager")
class StorageManagerClient(Client):
"""This is the client to the StorageManager service, so even if it is not seen, it exposes all its RPC calls"""
def __init__(self, **kwargs):
super(StorageManagerClient, self).__init__(**kwargs)
self.setServer("StorageManagement/StorageManager")
|
ic-hep/DIRAC
|
src/DIRAC/StorageManagementSystem/Client/StorageManagerClient.py
|
Python
|
gpl-3.0
| 9,962
|
[
"DIRAC"
] |
9a2368fb10c9c5d9434a38af21ae31c6d31b32e412cd07af737893cecccdba47
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import TemplateView
from django.urls import reverse_lazy
from rest_framework.decorators import api_view
from catmaid.control import (authentication, user, group, log, message, client,
common, project, stack, stackgroup, tile, tracing, stats,
annotation, textlabel, label, link, connector,
neuron, node, treenode, suppressed_virtual_treenode, skeleton,
skeletonexport, treenodeexport, cropping, data_view, ontology,
classification, notifications, roi, clustering, volume, noop,
useranalytics, user_evaluation, search, graphexport, transaction,
graph2, circles, analytics, review, wiringdiagram, object, sampler,
similarity, nat, origin, point, landmarks, pointcloud, pointset)
from catmaid.history import record_request_action as record_view
from catmaid.views import CatmaidView
from catmaid.views.admin import ProjectDeletion
# A regular expression matching floating point and integer numbers
num = r'[-+]?[0-9]*\.?[0-9]+'
integer = r'[-+]?[0-9]+'
# A regular expression matching lists of integers with comma as delimiter
intlist = r'[0-9]+(,[0-9]+)*'
# A list of words, not containing commas
wordlist= r'\w+(,\w+)*'
app_name = 'catmaid'
# Add the main index.html page at the root:
urlpatterns = [
url(r'^$', ensure_csrf_cookie(CatmaidView.as_view(template_name='catmaid/index.html')), name="home"),
url(r'^version$', common.get_catmaid_version),
url(r'^neuroglancer$', ensure_csrf_cookie(CatmaidView.as_view(template_name='catmaid/neuroglancer.html'))),
]
# Additional administration views
urlpatterns += [
url(r'^admin/catmaid/project/delete-with-data$', ProjectDeletion.as_view(),
name="delete-projects-with-data"),
]
# Authentication and permissions
urlpatterns += [
url(r'^accounts/login$', authentication.login_user),
url(r'^accounts/logout$', authentication.logout_user),
url(r'^accounts/(?P<project_id>\d+)/all-usernames$', authentication.all_usernames),
url(r'^permissions$', authentication.user_project_permissions),
url(r'^classinstance/(?P<ci_id>\d+)/permissions$', authentication.get_object_permissions),
url(r'^register$', authentication.register),
]
# Users
urlpatterns += [
url(r'^user-list$', user.user_list),
url(r'^user-table-list$', user.user_list_datatable),
url(r'^user-profile/update$', user.update_user_profile),
url(r'^user/password_change/$', user.NonAnonymousPasswordChangeView.as_view(
success_url=reverse_lazy('catmaid:home'), raise_exception=False)),
]
# Groups
urlpatterns += [
url(r'^groups/$', group.GroupList.as_view())
]
# Log
urlpatterns += [
url(r'^(?P<project_id>\d+)/logs/list$', log.list_logs),
url(r'^log/(?P<level>(info|error|debug))$', log.log_frontent_event),
]
# Transaction history
urlpatterns += [
url(r'^(?P<project_id>\d+)/transactions/$', transaction.transaction_collection),
url(r'^(?P<project_id>\d+)/transactions/location$', transaction.get_location),
]
# Messages
urlpatterns += [
url(r'^messages/list$', message.list_messages),
url(r'^messages/(?P<message_id>\d+)/mark_read$', message.read_message),
url(r'^messages/latestunreaddate', message.get_latest_unread_date),
]
# CATMAID client datastore and data access
urlpatterns += [
url(r'^client/datastores/$', client.ClientDatastoreList.as_view()),
url(r'^client/datastores/(?P<name>[\w-]+)$', client.ClientDatastoreDetail.as_view()),
url(r'^client/datastores/(?P<name>[\w-]+)/$', client.ClientDataList.as_view()),
]
# General project model access
urlpatterns += [
url(r'^projects/$', project.projects),
url(r'^projects/export$', project.export_projects),
url(r'^(?P<project_id>\d+)/interpolatable-sections/$', project.interpolatable_sections),
url(r'^(?P<project_id>\d+)/fork$', project.fork),
]
# General stack model access
urlpatterns += [
url(r'^(?P<project_id>\d+)/stacks$', stack.stacks),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/info$', stack.stack_info),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/groups$', stack.stack_groups),
]
# General stack group access
urlpatterns += [
url(r'^(?P<project_id>\d+)/stackgroup/(?P<stackgroup_id>\d+)/info$', stackgroup.get_stackgroup_info),
]
# Tile access
urlpatterns += [
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tile$', tile.get_tile),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/put_tile$', tile.put_tile),
]
# Tracing general
urlpatterns += [
url(r'^(?P<project_id>\d+)/tracing/setup/rebuild$', tracing.rebuild_tracing_setup_view),
url(r'^(?P<project_id>\d+)/tracing/setup/test$', tracing.check_tracing_setup_view),
url(r'^(?P<project_id>\d+)/tracing/setup/validate$', tracing.validate_tracing_setup),
]
# Reconstruction sampling
urlpatterns += [
url(r'^(?P<project_id>\d+)/samplers/$', sampler.list_samplers),
url(r'^(?P<project_id>\d+)/samplers/add$', sampler.add_sampler),
url(r'^(?P<project_id>\d+)/samplers/domains/types/$', sampler.list_domain_types),
url(r'^(?P<project_id>\d+)/samplers/domains/intervals/states/$', sampler.list_interval_states),
url(r'^(?P<project_id>\d+)/samplers/domains/(?P<domain_id>\d+)/details$', sampler.get_domain_details),
url(r'^(?P<project_id>\d+)/samplers/domains/(?P<domain_id>\d+)/intervals/$', sampler.list_domain_intervals),
url(r'^(?P<project_id>\d+)/samplers/domains/(?P<domain_id>\d+)/intervals/add-all$', sampler.add_all_intervals),
url(r'^(?P<project_id>\d+)/samplers/domains/intervals/(?P<interval_id>\d+)/details$', sampler.get_interval_details),
url(r'^(?P<project_id>\d+)/samplers/domains/intervals/(?P<interval_id>\d+)/set-state$', sampler.set_interval_state),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/$', sampler.SamplerDetail.as_view()),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/delete$', sampler.delete_sampler),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/domains/$', sampler.list_sampler_domains),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/domains/add$', sampler.add_sampler_domain),
url(r'^(?P<project_id>\d+)/samplers/(?P<sampler_id>\d+)/domains/add-all$', sampler.add_multiple_sampler_domains),
url(r'^(?P<project_id>\d+)/samplers/connectors/$', sampler.list_connectors),
url(r'^(?P<project_id>\d+)/samplers/connectors/states/$', sampler.list_connector_states),
url(r'^(?P<project_id>\d+)/samplers/domains/intervals/(?P<interval_id>\d+)/connectors/(?P<connector_id>\d+)/set-state$',
sampler.set_connector_state),
url(r'^(?P<project_id>\d+)/samplers/states/$', sampler.list_sampler_states),
]
# Statistics
urlpatterns += [
url(r'^(?P<project_id>\d+)/stats/cable-length$', stats.stats_cable_length),
url(r'^(?P<project_id>\d+)/stats/nodecount$', stats.stats_nodecount),
url(r'^(?P<project_id>\d+)/stats/editor$', stats.stats_editor),
url(r'^(?P<project_id>\d+)/stats/summary$', stats.stats_summary),
url(r'^(?P<project_id>\d+)/stats/history$', stats.stats_history),
url(r'^(?P<project_id>\d+)/stats/user-history$', stats.stats_user_history),
url(r'^(?P<project_id>\d+)/stats/user-activity$', stats.stats_user_activity),
url(r'^(?P<project_id>\d+)/stats/server$', stats.ServerStats.as_view()),
]
# Annotations
urlpatterns += [
url(r'^(?P<project_id>\d+)/annotations/$', annotation.list_annotations),
url(r'^(?P<project_id>\d+)/annotations/query$', annotation.annotations_for_entities),
url(r'^(?P<project_id>\d+)/annotations/forskeletons$', annotation.annotations_for_skeletons),
url(r'^(?P<project_id>\d+)/annotations/table-list$', annotation.list_annotations_datatable),
url(r'^(?P<project_id>\d+)/annotations/add$', record_view("annotations.add")(annotation.annotate_entities)),
url(r'^(?P<project_id>\d+)/annotations/add-neuron-names$', record_view("annotations.addneuronname")(annotation.add_neuron_name_annotations)),
url(r'^(?P<project_id>\d+)/annotations/remove$', record_view("annotations.remove")(annotation.remove_annotations)),
url(r'^(?P<project_id>\d+)/annotations/(?P<annotation_id>\d+)/remove$', record_view("annotations.remove")(annotation.remove_annotation)),
url(r'^(?P<project_id>\d+)/annotations/query-targets$', annotation.query_annotated_classinstances),
]
# Text labels
urlpatterns += [
url(r'^(?P<project_id>\d+)/textlabel/create$', record_view("textlabels.create")(textlabel.create_textlabel)),
url(r'^(?P<project_id>\d+)/textlabel/delete$', record_view("textlabels.delete")(textlabel.delete_textlabel)),
url(r'^(?P<project_id>\d+)/textlabel/update$', record_view("textlabels.update")(textlabel.update_textlabel)),
url(r'^(?P<project_id>\d+)/textlabel/all', textlabel.textlabels),
]
# Treenode labels
urlpatterns += [
url(r'^(?P<project_id>\d+)/labels/$', label.labels_all),
url(r'^(?P<project_id>\d+)/labels/detail$', label.labels_all_detail),
url(r'^(?P<project_id>\d+)/labels/stats$', label.get_label_stats),
url(r'^(?P<project_id>\d+)/labels-for-nodes$', label.labels_for_nodes),
url(r'^(?P<project_id>\d+)/labels/(?P<node_type>(treenode|location|connector))/(?P<node_id>\d+)/$', label.labels_for_node),
url(r'^(?P<project_id>\d+)/label/(?P<ntype>(treenode|location|connector))/(?P<location_id>\d+)/update$', record_view("labels.update")(label.label_update)),
url(r'^(?P<project_id>\d+)/label/(?P<ntype>(treenode|location|connector))/(?P<location_id>\d+)/remove$', record_view("labels.remove")(label.remove_label_link)),
url(r'^(?P<project_id>\d+)/label/remove$', record_view("labels.remove_unused")(label.label_remove)),
]
# Links
urlpatterns += [
url(r'^(?P<project_id>\d+)/link/create$', record_view("links.create")(link.create_link)),
url(r'^(?P<project_id>\d+)/link/delete$', record_view("links.remove")(link.delete_link)),
]
# Connector access
urlpatterns += [
url(r'^(?P<project_id>\d+)/connector/create$', record_view("connectors.create")(connector.create_connector)),
url(r'^(?P<project_id>\d+)/connector/delete$', record_view("connectors.remove")(connector.delete_connector)),
url(r'^(?P<project_id>\d+)/connector/list/graphedge$', connector.graphedge_list),
url(r'^(?P<project_id>\d+)/connector/list/one_to_many$', connector.one_to_many_synapses),
url(r'^(?P<project_id>\d+)/connector/list/many_to_many$', connector.many_to_many_synapses),
url(r'^(?P<project_id>\d+)/connector/list/completed$', connector.list_completed),
url(r'^(?P<project_id>\d+)/connector/list/linked-to-nodes$', connector.connectors_from_treenodes),
url(r'^(?P<project_id>\d+)/connector/skeletons$', connector.connector_skeletons),
url(r'^(?P<project_id>\d+)/connector/edgetimes$', connector.connector_associated_edgetimes),
url(r'^(?P<project_id>\d+)/connector/info$', connector.connectors_info),
url(r'^(?P<project_id>\d+)/connectors/$', connector.list_connectors),
url(r'^(?P<project_id>\d+)/connectors/links/$', connector.list_connector_links),
url(r'^(?P<project_id>\d+)/connectors/(?P<connector_id>\d+)/$',
connector.connector_detail),
url(r'^(?P<project_id>\d+)/connectors/user-info$', connector.connector_user_info),
url(r'^(?P<project_id>\d+)/connectors/types/$', connector.connector_types),
url(r'^(?P<project_id>\d+)/connectors/in-bounding-box$', connector.connectors_in_bounding_box),
]
# Neuron access
urlpatterns += [
url(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/get-all-skeletons$', neuron.get_all_skeletons_of_neuron),
url(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/give-to-user$', record_view("neurons.give_to_user")(neuron.give_neuron_to_other_user)),
url(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/delete$', record_view("neurons.remove")(neuron.delete_neuron)),
url(r'^(?P<project_id>\d+)/neurons/(?P<neuron_id>\d+)/rename$', record_view("neurons.rename")(neuron.rename_neuron)),
url(r'^(?P<project_id>\d+)/neurons/$', neuron.list_neurons),
url(r'^(?P<project_id>\d+)/neurons/from-models$', neuron.get_neuron_ids_from_models),
]
# Node access
urlpatterns += [
url(r'^(?P<project_id>\d+)/node/(?P<node_id>\d+)/reviewed$', record_view("nodes.add_or_update_review")(node.update_location_reviewer)),
url(r'^(?P<project_id>\d+)/nodes/most-recent$', node.most_recent_treenode),
url(r'^(?P<project_id>\d+)/nodes/location$', node.get_locations),
url(r'^(?P<project_id>\d+)/nodes/nearest$', node.node_nearest),
url(r'^(?P<project_id>\d+)/node/update$', record_view("nodes.update_location")(node.node_update)),
url(r'^(?P<project_id>\d+)/node/list$', node.node_list_tuples),
url(r'^(?P<project_id>\d+)/node/get_location$', node.get_location),
url(r'^(?P<project_id>\d+)/node/user-info$', node.user_info),
url(r'^(?P<project_id>\d+)/nodes/find-labels$', node.find_labels),
url(r'^(?P<project_id>\d+)/nodes/$', api_view(['POST'])(node.node_list_tuples)),
]
# Treenode access
urlpatterns += [
url(r'^(?P<project_id>\d+)/treenode/create$', record_view("treenodes.create")(treenode.create_treenode)),
url(r'^(?P<project_id>\d+)/treenode/insert$', record_view("treenodes.insert")(treenode.insert_treenode)),
url(r'^(?P<project_id>\d+)/treenode/delete$', record_view("treenodes.remove")(treenode.delete_treenode)),
url(r'^(?P<project_id>\d+)/treenodes/compact-detail$', treenode.compact_detail_list),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/info$', treenode.treenode_info),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/compact-detail$', treenode.compact_detail),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/children$', treenode.find_children),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/confidence$', record_view("treenodes.update_confidence")(treenode.update_confidence)),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/parent$', record_view("treenodes.update_parent")(treenode.update_parent)),
url(r'^(?P<project_id>\d+)/treenode/(?P<treenode_id>\d+)/radius$', record_view("treenodes.update_radius")(treenode.update_radius)),
url(r'^(?P<project_id>\d+)/treenodes/radius$', record_view("treenodes.update_radius")(treenode.update_radii)),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/previous-branch-or-root$', treenode.find_previous_branchnode_or_root),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/next-branch-or-end$', treenode.find_next_branchnode_or_end),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/importing-user$', treenode.importing_user),
]
# Suppressed virtual treenode access
urlpatterns += [
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/suppressed-virtual/$',
record_view("treenodes.suppress_virtual_node", "POST")(suppressed_virtual_treenode.SuppressedVirtualTreenodeList.as_view())),
url(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/suppressed-virtual/(?P<suppressed_id>\d+)$',
record_view("treenodes.unsuppress_virtual_node", "DELETE")(suppressed_virtual_treenode.SuppressedVirtualTreenodeDetail.as_view())),
]
# General skeleton access
urlpatterns += [
url(r'^(?P<project_id>\d+)/skeletons/$', skeleton.list_skeletons),
url(r'^(?P<project_id>\d+)/skeletons/cable-length$', skeleton.cable_lengths),
url(r'^(?P<project_id>\d+)/skeletons/connectivity-counts$', skeleton.connectivity_counts),
url(r'^(?P<project_id>\d+)/skeletons/completeness$', skeleton.completeness),
url(r'^(?P<project_id>\d+)/skeletons/validity$', skeleton.validity),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/node_count$', skeleton.node_count),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/neuronname$', skeleton.neuronname),
url(r'^(?P<project_id>\d+)/skeleton/neuronnames$', skeleton.neuronnames),
url(r'^(?P<project_id>\d+)/skeleton/node/(?P<treenode_id>\d+)/node_count$', skeleton.node_count),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/review/reset-own$', record_view("skeletons.reset_own_reviews")(skeleton.reset_own_reviewer_ids)),
url(r'^(?P<project_id>\d+)/skeletons/connectivity$', skeleton.skeleton_info_raw),
url(r'^(?P<project_id>\d+)/skeletons/in-bounding-box$', skeleton.skeletons_in_bounding_box),
url(r'^(?P<project_id>\d+)/skeleton/connectivity_matrix$', skeleton.connectivity_matrix),
url(r'^(?P<project_id>\d+)/skeletons/connectivity_matrix/csv$', skeleton.connectivity_matrix_csv),
url(r'^(?P<project_id>\d+)/skeletons/review-status$', skeleton.review_status),
url(r'^(?P<project_id>\d+)/skeletons/from-origin$', skeleton.from_origin),
url(r'^(?P<project_id>\d+)/skeletons/origin$', skeleton.origin_info),
url(r'^(?P<project_id>\d+)/skeletons/import-info$', skeleton.import_info),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/statistics$', skeleton.skeleton_statistics),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/contributor_statistics$', skeleton.contributor_statistics),
url(r'^(?P<project_id>\d+)/skeleton/contributor_statistics_multiple$', skeleton.contributor_statistics_multiple),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/find-labels$', skeleton.find_labels),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/open-leaves$', skeleton.open_leaves),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/root$', skeleton.root_for_skeleton),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/sampler-count$', skeleton.sampler_count),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/cable-length$', skeleton.cable_length),
url(r'^(?P<project_id>\d+)/skeleton/split$', record_view("skeletons.split")(skeleton.split_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/ancestry$', skeleton.skeleton_ancestry),
url(r'^(?P<project_id>\d+)/skeleton/join$', record_view("skeletons.merge")(skeleton.join_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/reroot$', record_view("skeletons.reroot")(skeleton.reroot_skeleton)),
url(r'^(?P<project_id>\d+)/skeletons/sampler-count$', skeleton.list_sampler_count),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/permissions$', skeleton.get_skeleton_permissions),
url(r'^(?P<project_id>\d+)/skeletons/import$', record_view("skeletons.import")(skeleton.import_skeleton)),
url(r'^(?P<project_id>\d+)/skeleton/annotationlist$', skeleton.annotation_list),
url(r'^(?P<project_id>\d+)/skeletons/within-spatial-distance$', skeleton.within_spatial_distance),
url(r'^(?P<project_id>\d+)/skeletons/node-labels$', skeleton.skeletons_by_node_labels),
url(r'^(?P<project_id>\d+)/skeletons/change-history$', skeleton.change_history),
url(r'^(?P<project_id>\d+)/skeletongroup/adjacency_matrix$', skeleton.adjacency_matrix),
url(r'^(?P<project_id>\d+)/skeletongroup/skeletonlist_subgraph', skeleton.skeletonlist_subgraph),
url(r'^(?P<project_id>\d+)/skeletongroup/all_shared_connectors', skeleton.all_shared_connectors),
]
urlpatterns += [
url(r'^(?P<project_id>\d+)/origins/$', origin.OriginCollection.as_view()),
]
# Skeleton export
urlpatterns += [
url(r'^(?P<project_id>\d+)/neuroml/neuroml_level3_v181$', skeletonexport.export_neuroml_level3_v181),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/swc$', skeletonexport.skeleton_swc),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/eswc$', skeletonexport.skeleton_eswc),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/neuroml$', skeletonexport.skeletons_neuroml),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/json$', skeletonexport.skeleton_with_metadata),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/compact-json$', skeletonexport.skeleton_for_3d_viewer),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/nrrd$', nat.r.export_nrrd),
url(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_nodes>\d)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-arbor$', skeletonexport.compact_arbor),
url(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_nodes>\d)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-arbor-with-minutes$', skeletonexport.compact_arbor_with_minutes),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/review$', skeletonexport.export_review_skeleton),
url(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/reviewed-nodes$', skeletonexport.export_skeleton_reviews),
url(r'^(?P<project_id>\d+)/skeletons/measure$', skeletonexport.measure_skeletons),
url(r'^(?P<project_id>\d+)/skeleton/connectors-by-partner$', skeletonexport.skeleton_connectors_by_partner),
url(r'^(?P<project_id>\d+)/skeletons/partners-by-connector$', skeletonexport.partners_by_connector),
url(r'^(?P<project_id>\d+)/skeletons/connector-polyadicity$', skeletonexport.connector_polyadicity),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/compact-detail$', skeletonexport.compact_skeleton_detail),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/neuroglancer$', skeletonexport.neuroglancer_skeleton),
url(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/node-overview$', skeletonexport.treenode_overview),
url(r'^(?P<project_id>\d+)/skeletons/compact-detail$', skeletonexport.compact_skeleton_detail_many),
# Marked as deprecated, but kept for backwards compatibility
url(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-skeleton$', skeletonexport.compact_skeleton),
]
# Treenode and Connector image stack archive export
urlpatterns += [
url(r'^(?P<project_id>\d+)/connectorarchive/export$', treenodeexport.export_connectors),
url(r'^(?P<project_id>\d+)/treenodearchive/export$', treenodeexport.export_treenodes),
]
# Pointclouds
urlpatterns += [
url(r'^(?P<project_id>\d+)/pointclouds/$', pointcloud.PointCloudList.as_view()),
url(r'^(?P<project_id>\d+)/pointclouds/(?P<pointcloud_id>\d+)/$', pointcloud.PointCloudDetail.as_view()),
url(r'^(?P<project_id>\d+)/pointclouds/(?P<pointcloud_id>\d+)/images/(?P<image_id>\d+)/$', pointcloud.PointCloudImageDetail.as_view()),
]
# Pointsets
urlpatterns += [
url(r'^(?P<project_id>\d+)/pointsets/$', pointset.PointSetList.as_view()),
url(r'^(?P<project_id>\d+)/pointsets/(?P<pointset_id>\d+)/$', pointset.PointSetDetail.as_view()),
]
urlpatterns += [
url(r'^(?P<project_id>\d+)/similarity/configs/$', similarity.ConfigurationList.as_view()),
url(r'^(?P<project_id>\d+)/similarity/configs/(?P<config_id>\d+)/$', similarity.ConfigurationDetail.as_view()),
url(r'^(?P<project_id>\d+)/similarity/configs/(?P<config_id>\d+)/recompute$', similarity.recompute_config),
url(r'^(?P<project_id>\d+)/similarity/queries/$', similarity.SimilarityList.as_view()),
url(r'^(?P<project_id>\d+)/similarity/queries/similarity$', similarity.compare_skeletons),
url(r'^(?P<project_id>\d+)/similarity/queries/(?P<similarity_id>\d+)/$', similarity.SimilarityDetail.as_view()),
url(r'^(?P<project_id>\d+)/similarity/queries/(?P<similarity_id>\d+)/recompute$', similarity.recompute_similarity),
url(r'^(?P<project_id>\d+)/similarity/test-setup$', similarity.test_setup),
]
# Cropping
urlpatterns += [
url(r'^(?P<project_id>\d+)/crop', cropping.crop),
url(r'^crop/download/(?P<file_path>.*)/$', cropping.download_crop)
]
# Tagging
urlpatterns += [
url(r'^(?P<project_id>\d+)/tags/list$', project.list_project_tags),
url(r'^(?P<project_id>\d+)/tags/clear$', record_view("projects.clear_tags")(project.update_project_tags)),
url(r'^(?P<project_id>\d+)/tags/(?P<tags>.*)/update$', record_view("projects.update_tags")(project.update_project_tags)),
]
urlpatterns += [
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/list$', stack.list_stack_tags),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/clear$', record_view("stacks.clear_tags")(stack.update_stack_tags)),
url(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/(?P<tags>.*)/update$', record_view("stacks.update_tags")(stack.update_stack_tags)),
]
# Data views
urlpatterns += [
url(r'^dataviews/list$', data_view.get_available_data_views, name='list_dataviews'),
url(r'^dataviews/default$', data_view.get_default_properties, name='default_dataview'),
url(r'^dataviews/(?P<data_view_id>\d+)/$', data_view.get_detail, name='detail_dataview'),
url(r'^dataviews/show/(?P<data_view_id>\d+)$', data_view.get_data_view, name='show_dataview'),
url(r'^dataviews/show/default$', data_view.get_default_data_view, name='show_default_dataview'),
url(r'^dataviews/type/comment$', data_view.get_data_view_type_comment, name='get_dataview_type_comment'),
url(r'^dataviews/type/(?P<data_view_id>\d+)$', data_view.get_data_view_type, name='get_dataview_type'),
]
# Ontologies
urlpatterns += [
url(r'^ontology/knownroots$', ontology.get_known_ontology_roots),
url(r'^(?P<project_id>%s)/ontology/roots/$' % (integer), ontology.get_existing_roots),
url(r'^(?P<project_id>%s)/ontology/list$' % (integer), ontology.list_ontology),
url(r'^(?P<project_id>%s)/ontology/relations$' % (integer), ontology.get_available_relations),
url(r'^(?P<project_id>%s)/ontology/relations/add$' % (integer), record_view("ontologies.add_relation")(ontology.add_relation_to_ontology)),
url(r'^(?P<project_id>%s)/ontology/relations/rename$' % (integer), record_view("ontologies.rename_relation")(ontology.rename_relation)),
url(r'^(?P<project_id>%s)/ontology/relations/remove$' % (integer), record_view("ontologies.remove_relation")(ontology.remove_relation_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/relations/removeall$' % (integer), record_view("ontologies.remove_all_relations")(ontology.remove_all_relations_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/relations/list$' % (integer), ontology.list_available_relations),
url(r'^(?P<project_id>%s)/ontology/classes$' % (integer), ontology.get_available_classes),
url(r'^(?P<project_id>%s)/ontology/classes/add$' % (integer), record_view("ontologies.add_class")(ontology.add_class_to_ontology)),
url(r'^(?P<project_id>%s)/ontology/classes/rename$' % (integer), record_view("ontologies.rename_class")(ontology.rename_class)),
url(r'^(?P<project_id>%s)/ontology/classes/remove$' % (integer), record_view("ontologies.remove_class")(ontology.remove_class_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/classes/removeall$' % (integer), record_view("ontologies.remove_all_classes")(ontology.remove_all_classes_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/classes/list$' % (integer), ontology.list_available_classes),
url(r'^(?P<project_id>%s)/ontology/links/add$' % (integer), record_view("ontologies.add_link")(ontology.add_link_to_ontology)),
url(r'^(?P<project_id>%s)/ontology/links/remove$' % (integer), record_view("ontologies.remove_link")(ontology.remove_link_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/links/removeselected$' % (integer), record_view("ontologies.remove_link")(ontology.remove_selected_links_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/links/removeall$' % (integer), record_view("ontologies.remove_all_links")(ontology.remove_all_links_from_ontology)),
url(r'^(?P<project_id>%s)/ontology/restrictions/add$' % (integer), record_view("ontologies.add_restriction")(ontology.add_restriction)),
url(r'^(?P<project_id>%s)/ontology/restrictions/remove$' % (integer), record_view("ontologies.remove_restriction")(ontology.remove_restriction)),
url(r'^(?P<project_id>%s)/ontology/restrictions/(?P<restriction>[^/]*)/types$' % (integer), ontology.get_restriction_types),
]
# Classification
urlpatterns += [
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/roots/$',
classification.get_classification_roots),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/setup/test$',
classification.check_classification_setup_view, name='test_classification_setup'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/setup/rebuild$',
record_view("classifications.rebuild_env")(classification.rebuild_classification_setup_view), name='rebuild_classification_setup'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/new$',
record_view("classifications.add_graph")(classification.add_classification_graph), name='add_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/list$',
classification.list_classification_graph, name='list_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/list/(?P<link_id>\d+)$',
classification.list_classification_graph, name='list_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/(?P<link_id>\d+)/remove$',
record_view("classifications.remove_graph")(classification.remove_classification_graph), name='remove_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/instance-operation$',
record_view("classifications.update_graph")(classification.classification_instance_operation), name='classification_instance_operation'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/(?P<link_id>\d+)/autofill$',
record_view("classifications.autofill_graph")(classification.autofill_classification_graph), name='autofill_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/link$',
record_view("classifications.link_graph")(classification.link_classification_graph), name='link_classification_graph'),
url(rf'^(?P<project_id>{integer})/classification/(?P<workspace_pid>{integer})/stack/(?P<stack_id>{integer})/linkroi/(?P<ci_id>{integer})/$',
record_view("classifications.link_roi")(classification.link_roi_to_classification), name='link_roi_to_classification'),
url(rf'^classification/(?P<workspace_pid>{integer})/export$',
classification.export, name='export_classification'),
url(rf'^classification/(?P<workspace_pid>{integer})/export/excludetags/(?P<exclusion_tags>{wordlist})/$',
classification.export, name='export_classification'),
url(rf'^classification/(?P<workspace_pid>{integer})/search$',
classification.search, name='search_classifications'),
url(rf'^classification/(?P<workspace_pid>{integer})/export_ontology$',
classification.export_ontology, name='export_ontology'),
]
# Notifications
urlpatterns += [
url(r'^(?P<project_id>\d+)/notifications/list$', notifications.list_notifications),
url(r'^(?P<project_id>\d+)/changerequest/approve$', record_view("change_requests.approve")(notifications.approve_change_request)),
url(r'^(?P<project_id>\d+)/changerequest/reject$', record_view("change_requests.reject")(notifications.reject_change_request)),
]
# Regions of interest
urlpatterns += [
url(rf'^(?P<project_id>{integer})/roi/(?P<roi_id>{integer})/info$', roi.get_roi_info, name='get_roi_info'),
url(rf'^(?P<project_id>{integer})/roi/link/(?P<relation_id>{integer})/stack/(?P<stack_id>{integer})/ci/(?P<ci_id>{integer})/$',
record_view("rois.create_link")(roi.link_roi_to_class_instance), name='link_roi_to_class_instance'),
url(rf'^(?P<project_id>{integer})/roi/(?P<roi_id>{integer})/remove$', record_view("rois.remove_link")(roi.remove_roi_link), name='remove_roi_link'),
url(rf'^(?P<project_id>{integer})/roi/(?P<roi_id>{integer})/image$', roi.get_roi_image, name='get_roi_image'),
url(rf'^(?P<project_id>{integer})/roi/add$', record_view("rois.create")(roi.add_roi), name='add_roi'),
]
# General points
urlpatterns += [
url(rf'^(?P<project_id>{integer})/points/$', point.PointList.as_view()),
url(rf'^(?P<project_id>{integer})/points/(?P<point_id>[0-9]+)/$', point.PointDetail.as_view()),
]
# Landmarks
urlpatterns += [
url(rf'^(?P<project_id>{integer})/landmarks/$', landmarks.LandmarkList.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/(?P<landmark_id>[0-9]+)/$', landmarks.LandmarkDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/(?P<landmark_id>[0-9]+)/locations/$',
landmarks.LandmarkLocationList.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/(?P<landmark_id>[0-9]+)/locations/(?P<location_id>[0-9]+)/$',
landmarks.LandmarkLocationDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/(?P<landmark_id>[0-9]+)/groups/(?P<group_id>[0-9]+)/$',
landmarks.LandmarkAndGroupkLocationDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/$', landmarks.LandmarkGroupList.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/import$', landmarks.LandmarkGroupImport.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/materialize$', landmarks.LandmarkGroupMaterializer.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/links/$', landmarks.LandmarkGroupLinks.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/links/(?P<link_id>[0-9]+)/$',
landmarks.LandmarkGroupLinkDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/(?P<landmarkgroup_id>[0-9]+)/$', landmarks.LandmarkGroupDetail.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/(?P<landmarkgroup_id>[0-9]+)/transitively-linked$',
landmarks.LandmarkGroupLinkage.as_view()),
url(rf'^(?P<project_id>{integer})/landmarks/groups/(?P<landmarkgroup_id>[0-9]+)/locations/(?P<location_id>[0-9]+)/$',
landmarks.LandmarkGroupLocationList.as_view()),
]
# Clustering
urlpatterns += [
url(r'^clustering/(?P<workspace_pid>\d+)/setup$',
record_view("clusterings.setup_env")(clustering.setup_clustering), name='clustering_setup'),
url(r'^clustering/(?P<workspace_pid>\d+)/show$',
TemplateView.as_view(template_name="catmaid/clustering/display.html"),
name="clustering_display"),
]
# Volumes
urlpatterns += [
url(r'^(?P<project_id>\d+)/volumes/$', volume.volume_collection),
url(r'^(?P<project_id>\d+)/volumes/add$', record_view("volumes.create")(volume.add_volume)),
url(r'^(?P<project_id>\d+)/volumes/import$', record_view("volumes.create")(volume.import_volumes)),
url(r'^(?P<project_id>\d+)/volumes/entities/$', volume.get_volume_entities),
url(r'^(?P<project_id>\d+)/volumes/skeleton-innervations$', volume.get_skeleton_innervations),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/$', volume.VolumeDetail.as_view()),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/intersect$', volume.intersects),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/export\.(?P<extension>\w+)', volume.export_volume),
url(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/update-meta-info$', volume.update_meta_information),
]
# Analytics
urlpatterns += [
url(r'^(?P<project_id>\d+)/analytics/skeletons$', analytics.analyze_skeletons),
url(r'^(?P<project_id>\d+)/analytics/broken-section-nodes$', analytics.list_broken_section_nodes)
]
# Front-end tests, disabled by default
if settings.FRONT_END_TESTS_ENABLED:
urlpatterns += [
url(r'^tests$', login_required(CatmaidView.as_view(template_name="catmaid/tests.html")), name="frontend_tests"),
]
# Collection of various parts of the CATMAID API. These methods are usually
# one- or two-liners and having them in a separate statement would not improve
# readability. Therefore, they are all declared in this general statement.
urlpatterns += [
# User analytics and proficiency
url(r'^(?P<project_id>\d+)/useranalytics$', useranalytics.plot_useranalytics),
url(r'^(?P<project_id>\d+)/userproficiency$', user_evaluation.evaluate_user),
url(r'^(?P<project_id>\d+)/graphexport/json$', graphexport.export_jsongraph),
# Graphs
url(r'^(?P<project_id>\d+)/skeletons/confidence-compartment-subgraph', graph2.skeleton_graph),
# Circles
url(r'^(?P<project_id>\d+)/graph/circlesofhell', circles.circles_of_hell),
url(r'^(?P<project_id>\d+)/graph/directedpaths', circles.find_directed_paths),
url(r'^(?P<project_id>\d+)/graph/dps', circles.find_directed_path_skeletons),
# Review
url(r'^(?P<project_id>\d+)/user/reviewer-whitelist$', review.reviewer_whitelist),
# Search
url(r'^(?P<project_id>\d+)/search$', search.search),
# Wiring diagram export
url(r'^(?P<project_id>\d+)/wiringdiagram/json$', wiringdiagram.export_wiring_diagram),
url(r'^(?P<project_id>\d+)/wiringdiagram/nx_json$', wiringdiagram.export_wiring_diagram_nx),
# Annotation graph export
url(r'^(?P<project_id>\d+)/annotationdiagram/nx_json$', object.convert_annotations_to_networkx),
]
# Patterns for Janelia render web service access
from catmaid.control.janelia_render import (
project as janelia_render_project,
review as janelia_render_review,
stack as janelia_render_stack)
urlpatterns += [
url(r'^janelia-render/projects/$', janelia_render_project.projects),
url(r'^(?P<project_id>.+)/user/reviewer-whitelist$', janelia_render_review.reviewer_whitelist),
url(r'^(?P<project_id>.+)/interpolatable-sections/$', noop.interpolatable_sections),
url(r'^janelia-render/(?P<project_id>.+)/stack/(?P<stack_id>.+)/info$', janelia_render_stack.stack_info),
url(r'^janelia-render/(?P<project_id>.+)/stacks$', janelia_render_stack.stacks),
url(r'^janelia-render/(?P<project_id>.+)/annotations/$', noop.list_annotations),
url(r'^janelia-render/(?P<project_id>.+)/annotations/query-targets$', noop.query_annotation_targets),
url(r'^janelia-render/client/datastores/(?P<name>[\w-]+)/$', noop.datastore_settings),
]
# Patterns for DVID access
from catmaid.control.dvid import (project as dvidproject,
review as dvidreview, stack as dvidstack)
urlpatterns += [
url(r'^dvid/projects/$', dvidproject.projects),
url(r'^(?P<project_id>.+)/user/reviewer-whitelist$', dvidreview.reviewer_whitelist),
url(r'^(?P<project_id>.+)/interpolatable-sections/$', noop.interpolatable_sections),
url(r'^dvid/(?P<project_id>.+)/stack/(?P<stack_id>.+)/info$', dvidstack.stack_info),
url(r'^dvid/(?P<project_id>.+)/stacks$', dvidstack.stacks),
url(r'^dvid/(?P<project_id>.+)/annotations/$', noop.list_annotations),
url(r'^dvid/(?P<project_id>.+)/annotations/query-targets$', noop.query_annotation_targets),
url(r'^dvid/client/datastores/(?P<name>[\w-]+)/$', noop.datastore_settings),
]
|
tomka/CATMAID
|
django/applications/catmaid/urls.py
|
Python
|
gpl-3.0
| 38,907
|
[
"NEURON"
] |
f6764a5093cf19fb54a6d5c4195d1a92cd6d6b4ad11d766fde0e1e8952f5f5c1
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
##
""" Test for lib/cardinals_ptbr module. """
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.lib.cardinals.pt import to_words, to_words_as_money
class TestParameter(DomainTest):
def test_to_words(self):
self.assertEqual(to_words(0), "zero")
self.assertEqual(to_words(00), "zero")
self.assertEqual(to_words(000), "zero")
self.assertEqual(to_words(0000), "zero")
self.assertEqual(to_words(2), "dois")
self.assertEqual(to_words(2, feminine=1), "duas")
self.assertEqual(to_words(3), u"três")
self.assertEqual(to_words(10), "dez")
self.assertEqual(to_words(11), "onze")
self.assertEqual(to_words(15), "quinze")
self.assertEqual(to_words(20), "vinte")
self.assertEqual(to_words(25), "vinte e cinco")
self.assertEqual(to_words(49), "quarenta e nove")
self.assertEqual(to_words(100), "cem")
self.assertEqual(to_words(101), "cento e um")
self.assertEqual(to_words(116), "cento e dezesseis")
self.assertEqual(to_words(136), "cento e trinta e seis")
self.assertEqual(to_words(125), "cento e vinte e cinco")
self.assertEqual(to_words(225), "duzentos e vinte e cinco")
self.assertEqual(to_words(201), "duzentos e um")
self.assertEqual(to_words(202), "duzentos e dois")
self.assertEqual(to_words(202, feminine=1), "duzentas e duas")
self.assertEqual(to_words(212, feminine=1), "duzentas e doze")
self.assertEqual(to_words(1000), "um mil")
self.assertEqual(to_words(2000), "dois mil")
self.assertEqual(to_words(8000), "oito mil")
self.assertEqual(to_words(8001), "oito mil e um")
self.assertEqual(to_words(8101), "oito mil cento e um")
self.assertEqual(to_words(8301), "oito mil trezentos e um")
self.assertEqual(to_words(8501), "oito mil quinhentos e um")
self.assertEqual(to_words(8511), "oito mil quinhentos e onze")
self.assertEqual(to_words(7641),
"sete mil seiscentos e quarenta e um")
self.assertEqual(to_words(8600), "oito mil e seiscentos")
self.assertEqual(to_words(10000), "dez mil")
self.assertEqual(to_words(100000), "cem mil")
self.assertEqual(to_words(1000000), u"um milhão")
self.assertEqual(to_words(2000000), u"dois milhões")
self.assertEqual(to_words(2000100), u"dois milhões e cem")
self.assertEqual(to_words(2000111), u"dois milhões cento e onze")
self.assertEqual(to_words(2000102), u"dois milhões cento e dois")
self.assertEqual(to_words(2000102, feminine=1),
u"dois milhões cento e duas")
self.assertEqual(to_words(10000111), u"dez milhões cento e onze")
self.assertEqual(to_words(10000118),
u"dez milhões cento e dezoito")
self.assertEqual(to_words(100000111),
u"cem milhões cento e onze")
self.assertEqual(to_words(100010111),
u"cem milhões, dez mil cento e onze")
names = ['metro', 'metros']
self.assertEqual(to_words(1, unit_names=names), "um metro")
self.assertEqual(to_words(2, unit_names=names), "dois metros")
self.assertEqual(to_words(100, unit_names=names), "cem metros")
self.assertEqual(to_words(101, unit_names=names),
"cento e um metros")
self.assertEqual(to_words(2202, unit_names=names),
"dois mil duzentos e dois metros")
self.assertEqual(to_words(1000009, unit_names=names),
u"um milhão e nove metros")
def test_to_words_as_money(self):
names = ['real', 'reais', 'centavo', 'centavos']
self.assertEqual(to_words_as_money(1, names), "um real")
self.assertEqual(to_words_as_money(0.01, names), "um centavo")
self.assertEqual(to_words_as_money(0.25, names),
"vinte e cinco centavos")
self.assertEqual(to_words_as_money(100.02, names),
"cem reais e dois centavos")
self.assertEqual(to_words_as_money(100.20, names),
"cem reais e vinte centavos")
self.assertEqual(to_words_as_money(100.31, names),
"cem reais e trinta e um centavos")
self.assertEqual(to_words_as_money(100.01, names),
"cem reais e um centavo")
self.assertEqual(to_words_as_money(100.91, names),
"cem reais e noventa e um centavos")
|
andrebellafronte/stoq
|
stoqlib/lib/test/test_cardinals_pt.py
|
Python
|
gpl-2.0
| 5,510
|
[
"VisIt"
] |
d37bb3f028f7c885dd4213e9bd257b47017209ce448d8639b67d16c81a626056
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
from shutil import which
from pymatgen.command_line.enumlib_caller import EnumError, EnumlibAdaptor
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.site_transformations import RemoveSitesTransformation
from pymatgen.transformations.standard_transformations import SubstitutionTransformation
from pymatgen.util.testing import PymatgenTest
enum_cmd = which("enum.x") or which("multienum.x")
makestr_cmd = which("makestr.x") or which("makeStr.x") or which("makeStr.py")
enumlib_present = enum_cmd and makestr_cmd
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class EnumlibAdaptorTest(PymatgenTest):
_multiprocess_shared_ = True
def test_init(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
struct = self.get_structure("LiFePO4")
subtrans = SubstitutionTransformation({"Li": {"Li": 0.5}})
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 2)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 86)
for s in structures:
self.assertAlmostEqual(s.composition.get_atomic_fraction(Element("Li")), 0.5 / 6.5)
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 2, refine_structure=True)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 52)
subtrans = SubstitutionTransformation({"Li": {"Li": 0.25}})
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 1, refine_structure=True)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 1)
for s in structures:
self.assertAlmostEqual(s.composition.get_atomic_fraction(Element("Li")), 0.25 / 6.25)
# Make sure it works for completely disordered structures.
struct = Structure([[10, 0, 0], [0, 10, 0], [0, 0, 10]], [{"Fe": 0.5}], [[0, 0, 0]])
adaptor = EnumlibAdaptor(struct, 1, 2)
adaptor.run()
self.assertEqual(len(adaptor.structures), 3)
# Make sure it works properly when symmetry is broken by ordered sites.
struct = self.get_structure("LiFePO4")
subtrans = SubstitutionTransformation({"Li": {"Li": 0.25}})
s = subtrans.apply_transformation(struct)
# REmove some ordered sites to break symmetry.
removetrans = RemoveSitesTransformation([4, 7])
s = removetrans.apply_transformation(s)
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 4)
struct = Structure(
[[3, 0, 0], [0, 3, 0], [0, 0, 3]],
[{"Si": 0.5}] * 2,
[[0, 0, 0], [0.5, 0.5, 0.5]],
)
adaptor = EnumlibAdaptor(struct, 1, 3, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 10)
struct = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "EnumerateTest.json"))
adaptor = EnumlibAdaptor(struct, 1, 1)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 2)
def test_rounding_errors(self):
# It used to be that a rounding issue would result in this structure
# showing that Cu3Te2 satisfies an ordering of this structure.
# This has been fixed by multiplying the base by 100.
struct = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Cu7Te5.cif"))
adaptor = EnumlibAdaptor(struct, 1, 2)
self.assertRaises(EnumError, adaptor.run)
adaptor = EnumlibAdaptor(struct, 1, 5)
adaptor.run()
self.assertEqual(len(adaptor.structures), 197)
def test_partial_disorder(self):
s = Structure.from_file(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "garnet.cif"))
a = SpacegroupAnalyzer(s, 0.1)
prim = a.find_primitive()
s = prim.copy()
s["Al3+"] = {"Al3+": 0.5, "Ga3+": 0.5}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 7)
for s in structures:
self.assertEqual(s.formula, "Ca12 Al4 Ga4 Si12 O48")
s = prim.copy()
s["Ca2+"] = {"Ca2+": 1 / 3, "Mg2+": 2 / 3}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 20)
for s in structures:
self.assertEqual(s.formula, "Ca4 Mg8 Al8 Si12 O48")
s = prim.copy()
s["Si4+"] = {"Si4+": 1 / 3, "Ge4+": 2 / 3}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 18)
for s in structures:
self.assertEqual(s.formula, "Ca12 Al8 Si4 Ge8 O48")
@unittest.skip("Fails seemingly at random.")
def test_timeout(self):
s = Structure.from_file(filename=os.path.join(PymatgenTest.TEST_FILES_DIR, "garnet.cif"))
SpacegroupAnalyzer(s, 0.1)
s["Al3+"] = {"Al3+": 0.5, "Ga3+": 0.5}
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01, timeout=0.0000000000001)
self.assertRaises(TimeoutError, adaptor._run_multienum)
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/command_line/tests/test_enumlib_caller.py
|
Python
|
mit
| 6,019
|
[
"pymatgen"
] |
79143b2bdf720fde7182a6c489fe88d92f28cdf98a6b4e1a187d4db754fd7194
|
from __future__ import absolute_import
from __future__ import print_function
import operator
import itertools
from .base import NodeTransformer
from .. import nodes, errors, datatypes, functions
ifilter = itertools.ifilter
#==============================================================================#
class ScopeContext(object):
def __init__(self, solver):
self.solver = solver
def __enter__(self):
self.solver.push_scope()
return self
def __exit__(self, exc_type, exc_value, tb):
self.solver.pop_scope()
class NamespaceContext(object):
def __init__(self, solver):
self.solver = solver
def __enter__(self):
self.solver.push_namespace()
return self
def __exit__(self, exc_type, exc_value, tb):
self.solver.pop_namespace()
class Namespace(object):
def __init__(self):
self.scopes = [{}]
def push_scope(self):
self.scopes.append({})
def pop_scope(self):
self.scopes.pop()
def merge_child_namespace(self, child):
assert isinstance(child, Namespace)
assert len(child.scopes) == 1
for name, value in child.scopes[0].iteritems():
if name not in self.scopes[-1]:
self.scopes[-1][name] = value
def __getitem__(self, name):
for scope in self.scopes[::-1]:
try:
return scope[name]
except KeyError:
pass
# TODO: should this raise an exception or return a sentinel value?
raise KeyError('name not found')
def __setitem__(self, name, value):
self.scopes[-1][name] = value
#==============================================================================#
class Solver(NodeTransformer):
def __init__(self, options=None):
self.namespaces = []
self.context = None
def namespace(self):
return NamespaceContext(self)
def push_namespace(self):
self.namespaces.append(Namespace())
def pop_namespace(self):
if len(self.namespaces) > 1:
child = self.namespaces.pop()
self.namespaces[-1].merge_child_namespaces(child)
else:
self.namespaces.pop()
def scope(self):
return ScopeContext(self)
def push_scope(self):
self.namespaces[-1].push_scope()
def pop_scope(self):
self.namespaces[-1].pop_scope()
def assign_variable(self, name, value):
self.namespaces[-1][name] = value
def retrieve_variable(self, name):
return self.namespaces[-1][name]
def node_as_value(self, node):
assert not isinstance(node, (list, tuple))
if isinstance(node, nodes.Node):
return node.to_value()
return node
def value_as_node(self, value):
assert not isinstance(value, (list, tuple))
if not isinstance(value, nodes.Node):
if value.is_negative():
node = nodes.CSSValueNode.node_from_value(-value)
return nodes.UnaryOpExpr(op=nodes.UMinus(), operand=node)
else:
return nodes.CSSValueNode.node_from_value(value)
return value
#==========================================================================#
def __call__(self, node):
return self.visit(node)
def visit_Stylesheet(self, node):
# new namespace
with self.namespace():
node.statements = list(ifilter(bool, (self.visit(stmt) for stmt in node.statements)))
return node
def visit_ImportedStylesheet(self, node):
# new namespace
with self.namespace():
node.statements = list(ifilter(bool, (self.visit(stmt) for stmt in node.statements)))
return node
def visit_RuleSet(self, node):
# new scope
with self.scope():
node.selectors = list(ifilter(bool, (self.visit(sel) for sel in node.selectors)))
node.statements = list(ifilter(bool, (self.visit(stmt) for stmt in node.statements)))
return node
def visit_Declaration(self, node):
node.property = self.visit(node.property)
#TODO: set lineno on value node if it doesn't have one
node.expr = self.value_as_node(self.visit(node.expr))
return node
def visit_VarDef(self, node):
# solve Expr; Assign to variable.
# The VarDef node is removed from the syntax tree.
name = node.name
#TODO: set lineno on value node if it doesn't have one
value = self.value_as_node(self.visit(node.expr))
self.assign_variable(name, value)
return None
def visit_VarName(self, node):
# TODO: Replace VarRef with appropriate node.
# But, for that we need to know context. e.g. in a selector, a hash becomes an IdSelector -- in a declaration, a hash becomes a HexColor.
# TODO: handle unknown name errors
try:
return self.retrieve_variable(node.name)
except KeyError:
raise errors.CSSVarNameError()
def visit_UnaryOpExpr(self, node):
# TODO: handle TypeError 'bad operand type for ...' exceptions
operand = self.visit(node.operand)
if isinstance(node.op, nodes.UMinus):
return operator.neg(self.node_as_value(operand))
elif isinstance(node.op, nodes.UPlus):
return operator.pos(self.node_as_value(operand))
else:
raise RuntimeError() # pragma: no cover
_binop_map = {
nodes.AddOp(): operator.add,
nodes.MultOp(): operator.mul,
nodes.SubtractOp(): operator.sub,
nodes.DivisionOp(): operator.truediv,
}
def visit_BinaryOpExpr(self, node):
# TODO: handle TypeError 'unsupported operand type ...' exceptions
lhs = self.visit(node.lhs)
rhs = self.visit(node.rhs)
if isinstance(node.op, nodes.FwdSlashOp):
return node
try:
return self._binop_map[node.op](self.node_as_value(lhs), self.node_as_value(rhs))
except KeyError:
raise RuntimeError() # pragma: no cover
def visit_NaryOpExpr(self, node):
node.operands = list(ifilter(bool, (self.visit(operand) for operand in node.operands)))
return node
def call_function(self, name, operands):
# 1. check that function exists
# - function doesn't exist - not an error
try:
func = functions.get_function(name, len(operands))
except errors.CSSFunctionNotFound:
return None
# 2. convert operands to values
# 2.a. handle error on conversion
# - no to_value() method - not an error
# - error raised by to_value() - (probably an error--just like if to_value fails elsewhere)
# - If error is an error of conversion, not an error
# - If error is an error during calculations (for instance, if an
# arg involves addition of two terms whose types do not support
# addition), it is an error?
try:
args = tuple(self.node_as_value(x) for x in operands)
except Exception:
return None
# 3. call function
# 3.a. handle error raised by calling function
# - bad argument types - not an error?--perhaps an option?
try:
return func(*args)
except Exception:
return None
def visit_FunctionExpr(self, node):
# TODO: certain functions are handled specially:
# rgb(), hsl(), rgba(), hsla() become Colors
node.expr = self.visit(node.expr)
r = self.call_function(node.name, node.expr.operands)
if r is not None:
return r
return node
#==============================================================================#
|
colossalbit/cssypy
|
cssypy/visitors/solvers.py
|
Python
|
bsd-3-clause
| 8,124
|
[
"VisIt"
] |
47b679d2ae4b6c9f0a45afa0c746fb49e7aa3a46c32fffd0dd0a0219a5cb67c7
|
"""
Converting between nefis and netcdf
In a separate module to separate dependencies
Relies on the qnc wrapper of netcdf4
"""
from collections import defaultdict
from ...io import qnc
def nefis_to_nc(nef,squeeze_unl=True,squeeze_element=True,
short_if_unique=True,to_lower=True,unl_name='time',
element_map={},nc_kwargs={},nc=None):
"""
nef: an open Nefis object
squeeze_unl: unit length unlimited dimensions in groups are dropped
groups can't be dimensionless, so parameter values are often in
a unit-length group.
short_if_unique: element names which appear in only one group get
just the element name. others are group_element
to_lower: make names lower case
squeeze_element: unit dimensions in elements are also removed
unl_name: if there is a unique unlimited dimension length (ignoring
unit lengths if squeeze_unl is set) - use this name for it.
element_map: map original element names to new names. this matches
against element names before to_lower (but after strip), and the results
will *not* be subject to to_lower.
nc_kwargs: dict of argument to pass to qnc.empty
nc: altenatively, an already open QDataset
"""
if nc is None:
nc=qnc.empty(**nc_kwargs)
# required for writing to disk
nc._set_string_mode('fixed')
# string handling is a little funky -
# still working out whether it should be varlen or fixed.
# fixed makes the string length a new dimension, just annoying
# to get strings back from that.
# varlen ends up having an object dtype, which may not write out
# well with netcdf.
# check for unique element names
name_count=defaultdict(lambda: 0)
for group in nef.groups():
for elt_name in group.cell.element_names:
name_count[elt_name]+=1
# check for unique unlimited dimension:
n_unl=0
for group in nef.groups():
if 0 in group.shape and (group.unl_length()>1 or not squeeze_unl):
n_unl+=1
for group in nef.groups():
# print group.name
g_shape=group.shape
grp_slices=[slice(None)]*len(g_shape)
grp_dim_names=[None]*len(g_shape)
if 0 in g_shape: # has an unlimitied dimension
idx=list(g_shape).index(0)
if group.unl_length()==1 and squeeze_unl: # which will be squeezed
grp_slices[idx]=0
elif n_unl==1 and unl_name: # which will be named
grp_dim_names[idx]=unl_name
for elt_name in group.cell.element_names:
# print elt_name
if name_count[elt_name]==1 and short_if_unique:
vname=elt_name
else:
vname=group.name + "_" + elt_name
if vname in element_map:
vname=element_map[vname]
elif to_lower:
vname=vname.lower()
value=group.getelt(elt_name)
# apply slices
value=value[tuple(grp_slices)]
if squeeze_element:
# slices specific to this element
val_slices=[slice(None)]*len(value.shape)
# iterate over just the element portion of the shape
for idx in range(len(g_shape),len(val_slices)):
if value.shape[idx]==1:
val_slices[idx]=0
value=value[val_slices]
# mimics qnc naming.
names=[qnc.anon_dim_name(size=l) for l in value.shape]
for idx,name in enumerate(grp_dim_names):
if name:
names[idx]=name
names.append(Ellipsis)
nc[vname][names] = value
setattr(nc.variables[vname],'group_name',group.name)
return nc
|
rustychris/stompy
|
stompy/model/delft/nefis_nc.py
|
Python
|
mit
| 3,795
|
[
"NetCDF"
] |
ca4bf356cdfe1ba5a87766176d7c5b5e0b862305c6c6fb2e64ca7ee0954db180
|
# TODO implement references as k-folds
import numpy as np
from scipy import ndimage
import scipy.misc as sp
import h5py
def flipz(h_arr):
""" Flip heart array along z axis """
return h_arr[:,:,:,::-1,:]
def flipy(h_arr):
""" Flip heart array along y axis """
return h_arr[:,:,::-1,:,:]
def gblur(h_arr, sig=0.5):
""" Gaussian blur scan """
xaxis = h_arr.shape[1]* 2**3
twod_heart = h_arr.reshape(-1, xaxis)
blurred_heart = ndimage.gaussian_filter(twod_heart, sig)
return np.reshape(blurred_heart, h_arr.shape)
def rotate(h_arr):
""" Rotate around circular axis """
r90 = np.rot90(h_arr, 1, axes=(2,3))
r180 = np.rot90(h_arr, 2, axes=(2,3))
r270 = np.rot90(h_arr, 3, axes=(2,3))
return np.concatenate([r90,r180,r270])
def translate(h_arr):
""" Translate along y axis """
trans_arr = np.roll(h_arr, 2, axis=2)
for i in np.arange(4,np.shape(h_arr)[1],2):
trans_arr = np.append(trans_arr, np.roll(h_arr, i, axis=2), axis=0)
return trans_arr
def augment(h_arr):
""" AUGMENT BOI """
print("Start shape: "+str(h_arr.shape))
h_arr = np.concatenate((h_arr, flipz(h_arr))) # 2*n = 2n
print("After flip z: "+str(h_arr.shape))
h_arr = np.concatenate((h_arr, flipy(h_arr))) # 2*2n = 4n
print("After flip y: "+str(h_arr.shape))
h_arr = np.concatenate((h_arr, flipz(flipy(h_arr)))) # 2*4n = 8n
print("After flip z y: "+str(h_arr.shape))
h_arr = np.concatenate((h_arr, gblur(h_arr))) # 2*8n = 16n
print("After g blur: "+str(h_arr.shape))
h_arr = np.concatenate((h_arr, rotate(h_arr))) # 4*16n = 64n
print("After rotation: "+str(h_arr.shape))
h_arr = np.concatenate((h_arr, translate(h_arr))) # 16*64n = 1024n
print("After translation: "+str(h_arr.shape))
return h_arr
if __name__ == "__main__":
hf_real_data = h5py.File("./data/real_data.h5")
examples = hf_real_data["in_data"]
labelsOH = hf_real_data["in_labels"]
aug_arr = np.expand_dims(augment(np.expand_dims(examples[0],0)), 0)
print("Total array shape: "+str(aug_arr.shape)+"\n")
for heart in examples[1:]:
heart = np.expand_dims(heart, 0)
aug_arr = np.append(aug_arr, np.expand_dims(augment(heart), axis=0), axis=0)
print("Total array shape: "+str(aug_arr.shape)+"\n")
aug_indices = np.repeat(np.arange(examples.shape[0]), aug_arr.shape[1])
labelsOH = np.repeat(labelsOH, aug_arr.shape[1], axis=0)
aug_arr = np.reshape(aug_arr, [-1,aug_arr.shape[2],aug_arr.shape[3],aug_arr.shape[4],aug_arr.shape[5]])
print("Total array shape:"+str(aug_arr.shape)+"\n")
print("Total label shape:"+str(labelsOH.shape)+"\n")
with h5py.File("./data/aug_data.h5") as hf:
hf.create_dataset("in_data", data=aug_arr)
hf.create_dataset("in_labels", data=labelsOH)
hf.create_dataset("indices", data=aug_indices)
|
Smith42/heart-cnn
|
augmentor.py
|
Python
|
gpl-3.0
| 2,876
|
[
"Gaussian"
] |
53e1b09d16faed0bcb0804782223ac0072c69f0bb6339f700b89c6c4227b2f13
|
# Copyright 2000-2003 Jeff Chang.
# Copyright 2001-2008 Brad Chapman.
# Copyright 2005-2010 by Peter Cock.
# Copyright 2006-2009 Michiel de Hoon.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Represent a Sequence Feature holding info about a part of a sequence.
This is heavily modeled after the Biocorba SeqFeature objects, and
may be pretty biased towards GenBank stuff since I'm writing it
for the GenBank parser output...
What's here:
Base class to hold a Feature.
----------------------------
classes:
o SeqFeature
Hold information about a Reference.
----------------------------------
This is an attempt to create a General class to hold Reference type
information.
classes:
o Reference
Specify locations of a feature on a Sequence.
---------------------------------------------
This aims to handle, in Ewan's words, 'the dreaded fuzziness issue' in
much the same way as Biocorba. This has the advantages of allowing us
to handle fuzzy stuff in case anyone needs it, and also be compatible
with Biocorba.
classes:
o FeatureLocation - Specify the start and end location of a feature.
o ExactPosition - Specify the position as being exact.
o WithinPosition - Specify a position occuring within some range.
o BetweenPosition - Specify a position occuring between a range (OBSOLETE?).
o BeforePosition - Specify the position as being found before some base.
o AfterPosition - Specify the position as being found after some base.
o OneOfPosition - Specify a position where the location can be multiple positions.
"""
from Bio.Seq import MutableSeq, reverse_complement
class SeqFeature(object):
"""Represent a Sequence Feature on an object.
Attributes:
o location - the location of the feature on the sequence (FeatureLocation)
o type - the specified type of the feature (ie. CDS, exon, repeat...)
o location_operator - a string specifying how this SeqFeature may
be related to others. For example, in the example GenBank feature
shown below, the location_operator would be "join"
o strand - A value specifying on which strand (of a DNA sequence, for
instance) the feature deals with. 1 indicates the plus strand, -1
indicates the minus strand, 0 indicates stranded but unknown (? in GFF3),
while the default of None indicates that strand doesn't apply (dot in GFF3,
e.g. features on proteins)
o id - A string identifier for the feature.
o ref - A reference to another sequence. This could be an accession
number for some different sequence.
o ref_db - A different database for the reference accession number.
o qualifiers - A dictionary of qualifiers on the feature. These are
analagous to the qualifiers from a GenBank feature table. The keys of
the dictionary are qualifier names, the values are the qualifier
values.
o sub_features - Additional SeqFeatures which fall under this 'parent'
feature. For instance, if we having something like:
CDS join(1..10,30..40,50..60)
Then the top level feature would be of type 'CDS' from 1 to 60 (actually 0
to 60 in Python counting) with location_operator='join', and the three sub-
features would also be of type 'CDS', and would be from 1 to 10, 30 to
40 and 50 to 60, respectively (although actually using Python counting).
To get the nucleotide sequence for this CDS, you would need to take the
parent sequence and do seq[0:10]+seq[29:40]+seq[49:60] (Python counting).
Things are more complicated with strands and fuzzy positions. To save you
dealing with all these special cases, the SeqFeature provides an extract
method to do this for you.
"""
def __init__(self, location = None, type = '', location_operator = '',
strand = None, id = "<unknown id>",
qualifiers = None, sub_features = None,
ref = None, ref_db = None):
"""Initialize a SeqFeature on a Sequence.
location can either be a FeatureLocation (with strand argument also
given if required), or None.
e.g. With no strand, on the forward strand, and on the reverse strand:
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> f1 = SeqFeature(FeatureLocation(5,10), type="domain")
>>> f2 = SeqFeature(FeatureLocation(7,110), strand=1, type="CDS")
>>> f3 = SeqFeature(FeatureLocation(9,108), strand=-1, type="CDS")
An invalid strand will trigger an exception:
>>> f4 = SeqFeature(FeatureLocation(50,60), strand=2)
Traceback (most recent call last):
...
ValueError: Strand should be +1, -1, 0 or None, not 2
For exact start/end positions, an integer can be used (as shown above)
as shorthand for the ExactPosition object. For non-exact locations, the
FeatureLocation must be specified via the appropriate position objects.
"""
if strand not in [-1, 0, 1, None] :
raise ValueError("Strand should be +1, -1, 0 or None, not %s" \
% repr(strand))
if location is not None and not isinstance(location, FeatureLocation):
raise TypeError("FeatureLocation (or None) required for the location")
self.location = location
self.type = type
self.location_operator = location_operator
self.strand = strand
self.id = id
if qualifiers is None:
qualifiers = {}
self.qualifiers = qualifiers
if sub_features is None:
sub_features = []
self.sub_features = sub_features
self.ref = ref
self.ref_db = ref_db
def __repr__(self):
"""A string representation of the record for debugging."""
answer = "%s(%s" % (self.__class__.__name__, repr(self.location))
if self.type:
answer += ", type=%s" % repr(self.type)
if self.location_operator:
answer += ", location_operator=%s" % repr(self.location_operator)
if self.strand is not None:
answer += ", strand=%s" % repr(self.strand)
if self.id and self.id != "<unknown id>":
answer += ", id=%s" % repr(self.id)
if self.ref:
answer += ", ref=%s" % repr(self.ref)
if self.ref_db:
answer += ", ref_db=%s" % repr(self.ref_db)
answer += ")"
return answer
def __str__(self):
"""A readable summary of the feature intended to be printed to screen.
"""
out = "type: %s\n" % self.type
out += "location: %s\n" % self.location
if self.id and self.id != "<unknown id>":
out += "id: %s\n" % self.id
if self.ref or self.ref_db:
out += "ref: %s:%s\n" % (self.ref, self.ref_db)
out += "strand: %s\n" % self.strand
out += "qualifiers: \n"
for qual_key in sorted(self.qualifiers):
out += " Key: %s, Value: %s\n" % (qual_key,
self.qualifiers[qual_key])
if len(self.sub_features) != 0:
out += "Sub-Features\n"
for sub_feature in self.sub_features:
out +="%s\n" % sub_feature
return out
def _shift(self, offset):
"""Returns a copy of the feature with its location shifted (PRIVATE).
The annotation qaulifiers are copied."""
return SeqFeature(location = self.location._shift(offset),
type = self.type,
location_operator = self.location_operator,
strand = self.strand,
id = self.id,
qualifiers = dict(self.qualifiers.iteritems()),
sub_features = [f._shift(offset) for f in self.sub_features],
ref = self.ref,
ref_db = self.ref_db)
def _flip(self, length):
"""Returns a copy of the feature with its location flipped (PRIVATE).
The argument length gives the length of the parent sequence. For
example a location 0..20 (+1 strand) with parent length 30 becomes
after flipping 10..30 (-1 strand). Strandless (None) or unknown
strand (0) remain like that - just their end points are changed.
The annotation qaulifiers are copied.
"""
if self.strand == +1 :
new_strand = -1
elif self.strand == -1 :
new_strand = +1
else:
#When create new SeqFeature it will check this is 0 or None
new_strand = self.strand
return SeqFeature(location = self.location._flip(length),
type = self.type,
location_operator = self.location_operator,
strand = new_strand,
id = self.id,
qualifiers = dict(self.qualifiers.iteritems()),
sub_features = [f._flip(length) for f in self.sub_features[::-1]],
ref = self.ref,
ref_db = self.ref_db)
def extract(self, parent_sequence):
"""Extract feature sequence from the supplied parent sequence.
The parent_sequence can be a Seq like object or a string, and will
generally return an object of the same type. The exception to this is
a MutableSeq as the parent sequence will return a Seq object.
This should cope with complex locations including complements, joins
and fuzzy positions. Even mixed strand features should work! This
also covers features on protein sequences (e.g. domains), although
here reverse strand features are not permitted.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_protein
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> seq = Seq("MKQHKAMIVALIVICITAVVAAL", generic_protein)
>>> f = SeqFeature(FeatureLocation(8,15), type="domain")
>>> f.extract(seq)
Seq('VALIVIC', ProteinAlphabet())
Note - currently only sub-features of type "join" are supported.
"""
if isinstance(parent_sequence, MutableSeq):
#This avoids complications with reverse complements
#(the MutableSeq reverse complement acts in situ)
parent_sequence = parent_sequence.toseq()
if self.sub_features:
if self.location_operator!="join":
raise ValueError(self.location_operator)
if self.strand == -1:
#This is a special case given how the GenBank parser works.
#Must avoid doing the reverse complement twice.
parts = []
for f_sub in self.sub_features:
assert f_sub.strand==-1
parts.append(parent_sequence[f_sub.location.nofuzzy_start:\
f_sub.location.nofuzzy_end])
else:
#This copes with mixed strand features:
parts = [f_sub.extract(parent_sequence) \
for f_sub in self.sub_features]
#We use addition rather than a join to avoid alphabet issues:
f_seq = parts[0]
for part in parts[1:] : f_seq += part
else:
f_seq = parent_sequence[self.location.nofuzzy_start:\
self.location.nofuzzy_end]
if self.strand == -1:
#TODO - MutableSeq?
try:
f_seq = f_seq.reverse_complement()
except AttributeError:
assert isinstance(f_seq, str)
f_seq = reverse_complement(f_seq)
return f_seq
def __nonzero__(self):
"""Returns True regardless of the length of the feature.
This behaviour is for backwards compatibility, since until the
__len__ method was added, a SeqFeature always evaluated as True.
Note that in comparison, Seq objects, strings, lists, etc, will all
evaluate to False if they have length zero.
WARNING: The SeqFeature may in future evaluate to False when its
length is zero (in order to better match normal python behaviour)!
"""
return True
def __len__(self):
"""Returns the length of the region described by a feature.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_protein
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> seq = Seq("MKQHKAMIVALIVICITAVVAAL", generic_protein)
>>> f = SeqFeature(FeatureLocation(8,15), type="domain")
>>> len(f)
7
>>> f.extract(seq)
Seq('VALIVIC', ProteinAlphabet())
>>> len(f.extract(seq))
7
For simple features without subfeatures this is the same as the region
spanned (end position minus start position). However, for a feature
defined by combining several subfeatures (e.g. a CDS as the join of
several exons) the gaps are not counted (e.g. introns). This ensures
that len(f) == len(f.extract(parent_seq)), and also makes sure things
work properly with features wrapping the origin etc.
"""
if self.sub_features:
return sum(len(f) for f in self.sub_features)
else:
return len(self.location)
def __iter__(self):
"""Iterate over the parent positions within the feature.
The iteration order is strand aware, and can be thought of as moving
along the feature using the parent sequence coordinates:
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> f = SeqFeature(FeatureLocation(5,10), type="domain", strand=-1)
>>> len(f)
5
>>> for i in f: print i
9
8
7
6
5
>>> list(f)
[9, 8, 7, 6, 5]
"""
if self.sub_features:
if self.strand == -1:
for f in self.sub_features[::-1]:
for i in f.location:
yield i
else:
for f in self.sub_features:
for i in f.location:
yield i
elif self.strand == -1:
for i in range(self.location.nofuzzy_end-1,
self.location.nofuzzy_start-1, -1):
yield i
else:
for i in range(self.location.nofuzzy_start,
self.location.nofuzzy_end):
yield i
def __contains__(self, value):
"""Check if an integer position is within the feature.
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> f = SeqFeature(FeatureLocation(5,10), type="domain", strand=-1)
>>> len(f)
5
>>> [i for i in range(15) if i in f]
[5, 6, 7, 8, 9]
For example, to see which features include a SNP position, you could
use this:
>>> from Bio import SeqIO
>>> record = SeqIO.read("GenBank/NC_000932.gb", "gb")
>>> for f in record.features:
... if 1750 in f:
... print f.type, f.strand, f.location
source 1 [0:154478]
gene -1 [1716:4347]
tRNA -1 [1716:4347]
Note that for a feature defined as a join of several subfeatures (e.g.
the union of several exons) the gaps are not checked (e.g. introns).
In this example, the tRNA location is defined in the GenBank file as
complement(join(1717..1751,4311..4347)), so that position 1760 falls
in the gap:
>>> for f in record.features:
... if 1760 in f:
... print f.type, f.strand, f.location
source 1 [0:154478]
gene -1 [1716:4347]
Note that additional care may be required with fuzzy locations, for
example just before a BeforePosition:
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> from Bio.SeqFeature import BeforePosition
>>> f = SeqFeature(FeatureLocation(BeforePosition(3),8), type="domain")
>>> len(f)
5
>>> [i for i in range(10) if i in f]
[3, 4, 5, 6, 7]
"""
if not isinstance(value, int):
raise ValueError("Currently we only support checking for integer "
"positions being within a SeqFeature.")
if self.sub_features:
for f in self.sub_features:
if value in f:
return True
return False
else:
return value in self.location
# --- References
# TODO -- Will this hold PubMed and Medline information decently?
class Reference(object):
"""Represent a Generic Reference object.
Attributes:
o location - A list of Location objects specifying regions of
the sequence that the references correspond to. If no locations are
specified, the entire sequence is assumed.
o authors - A big old string, or a list split by author, of authors
for the reference.
o title - The title of the reference.
o journal - Journal the reference was published in.
o medline_id - A medline reference for the article.
o pubmed_id - A pubmed reference for the article.
o comment - A place to stick any comments about the reference.
"""
def __init__(self):
self.location = []
self.authors = ''
self.consrtm = ''
self.title = ''
self.journal = ''
self.medline_id = ''
self.pubmed_id = ''
self.comment = ''
def __str__(self):
"""Output an informative string for debugging.
"""
out = ""
for single_location in self.location:
out += "location: %s\n" % single_location
out += "authors: %s\n" % self.authors
if self.consrtm:
out += "consrtm: %s\n" % self.consrtm
out += "title: %s\n" % self.title
out += "journal: %s\n" % self.journal
out += "medline id: %s\n" % self.medline_id
out += "pubmed id: %s\n" % self.pubmed_id
out += "comment: %s\n" % self.comment
return out
def __repr__(self):
#TODO - Update this is __init__ later accpets values
return "%s(title=%s, ...)" % (self.__class__.__name__,
repr(self.title))
# --- Handling feature locations
class FeatureLocation(object):
"""Specify the location of a feature along a sequence.
This attempts to deal with fuzziness of position ends, but also
make it easy to get the start and end in the 'normal' case (no
fuzziness).
You should access the start and end attributes with
your_location.start and your_location.end. If the start and
end are exact, this will return the positions, if not, we'll return
the approriate Fuzzy class with info about the position and fuzziness.
Note that the start and end location numbering follow Python's scheme,
thus a GenBank entry of 123..150 (one based counting) becomes a location
of [122:150] (zero based counting).
"""
def __init__(self, start, end):
"""Specify the start and end of a sequence feature.
start and end arguments specify the values where the feature begins
and ends. These can either by any of the *Position objects that
inherit from AbstractPosition, or can just be integers specifying the
position. In the case of integers, the values are assumed to be
exact and are converted in ExactPosition arguments. This is meant
to make it easy to deal with non-fuzzy ends.
i.e. Short form:
>>> from Bio.SeqFeature import FeatureLocation
>>> loc = FeatureLocation(5,10)
Explicit form:
>>> from Bio.SeqFeature import FeatureLocation, ExactPosition
>>> loc = FeatureLocation(ExactPosition(5),ExactPosition(10))
Other fuzzy positions are used similarly,
>>> from Bio.SeqFeature import FeatureLocation
>>> from Bio.SeqFeature import BeforePosition, AfterPosition
>>> loc2 = FeatureLocation(BeforePosition(5),AfterPosition(10))
"""
if isinstance(start, AbstractPosition):
self._start = start
else:
self._start = ExactPosition(start)
if isinstance(end, AbstractPosition):
self._end = end
else:
self._end = ExactPosition(end)
def __str__(self):
"""Returns a representation of the location (with python counting).
For the simple case this uses the python splicing syntax, [122:150]
(zero based counting) which GenBank would call 123..150 (one based
counting).
"""
return "[%s:%s]" % (self._start, self._end)
def __repr__(self):
"""A string representation of the location for debugging."""
return "%s(%s,%s)" \
% (self.__class__.__name__, repr(self.start), repr(self.end))
def __nonzero__(self):
"""Returns True regardless of the length of the feature.
This behaviour is for backwards compatibility, since until the
__len__ method was added, a FeatureLocation always evaluated as True.
Note that in comparison, Seq objects, strings, lists, etc, will all
evaluate to False if they have length zero.
WARNING: The FeatureLocation may in future evaluate to False when its
length is zero (in order to better match normal python behaviour)!
"""
return True
def __len__(self):
"""Returns the length of the region described by the FeatureLocation.
Note that extra care may be needed for fuzzy locations, e.g.
>>> from Bio.SeqFeature import FeatureLocation
>>> from Bio.SeqFeature import BeforePosition, AfterPosition
>>> loc = FeatureLocation(BeforePosition(5),AfterPosition(10))
>>> len(loc)
5
"""
#TODO - Should we use nofuzzy_start and nofuzzy_end here?
return self._end.position + self._end.extension - self._start.position
def __contains__(self, value):
"""Check if an integer position is within the FeatureLocation.
Note that extra care may be needed for fuzzy locations, e.g.
>>> from Bio.SeqFeature import FeatureLocation
>>> from Bio.SeqFeature import BeforePosition, AfterPosition
>>> loc = FeatureLocation(BeforePosition(5),AfterPosition(10))
>>> len(loc)
5
>>> [i for i in range(15) if i in loc]
[5, 6, 7, 8, 9]
"""
if not isinstance(value, int):
raise ValueError("Currently we only support checking for integer "
"positions being within a FeatureLocation.")
#TODO - Should we use nofuzzy_start and nofuzzy_end here?
if value < self._start.position \
or value >= self._end.position + self._end.extension:
return False
else:
return True
def __iter__(self):
"""Iterate over the parent positions within the FeatureLocation.
>>> from Bio.SeqFeature import FeatureLocation
>>> from Bio.SeqFeature import BeforePosition, AfterPosition
>>> loc = FeatureLocation(BeforePosition(5),AfterPosition(10))
>>> len(loc)
5
>>> for i in loc: print i
5
6
7
8
9
>>> list(loc)
[5, 6, 7, 8, 9]
>>> [i for i in range(15) if i in loc]
[5, 6, 7, 8, 9]
"""
#TODO - Should we use nofuzzy_start and nofuzzy_end here?
for i in range(self._start.position,
self._end.position + self._end.extension):
yield i
def _shift(self, offset):
"""Returns a copy of the location shifted by the offset (PRIVATE)."""
return FeatureLocation(start = self._start._shift(offset),
end = self._end._shift(offset))
def _flip(self, length):
"""Returns a copy of the location after the parent is reversed (PRIVATE)."""
#Note this will flip the start and end too!
return FeatureLocation(start = self._end._flip(length),
end = self._start._flip(length))
start = property(fget= lambda self : self._start,
doc="Start location (possibly a fuzzy position, read only).")
end = property(fget= lambda self : self._end,
doc="End location (possibly a fuzzy position, read only).")
nofuzzy_start = property(
fget=lambda self: self._start.position,
doc="""Start position (integer, approximated if fuzzy, read only).
To get non-fuzzy attributes (ie. the position only) ask for
'location.nofuzzy_start', 'location.nofuzzy_end'. These should return
the largest range of the fuzzy position. So something like:
(10.20)..(30.40) should return 10 for start, and 40 for end.
""")
nofuzzy_end = property(
fget=lambda self: self._end.position + self._end.extension,
doc="""End position (integer, approximated if fuzzy, read only).
To get non-fuzzy attributes (ie. the position only) ask for
'location.nofuzzy_start', 'location.nofuzzy_end'. These should return
the largest range of the fuzzy position. So something like:
(10.20)..(30.40) should return 10 for start, and 40 for end.
""")
class AbstractPosition(object):
"""Abstract base class representing a position.
"""
def __init__(self, position, extension):
self.position = position
assert extension >= 0, extension
self.extension = extension
def __repr__(self):
"""String representation of the location for debugging."""
return "%s(%s,%s)" % (self.__class__.__name__, \
repr(self.position), repr(self.extension))
def __hash__(self):
"""Simple position based hash."""
#Note __hash__ must be implemented on Python 3.x if overriding __eq__
return hash(self.position)
def __eq__(self, other):
"""A simple equality for positions.
This is very simple-minded and just compares the position attribute
of the features; extensions are not considered at all. This could
potentially be expanded to try to take advantage of extensions.
"""
assert isinstance(other, AbstractPosition), \
"We can only do comparisons between Biopython Position objects."
return self.position == other.position
def __ne__(self, other):
"""A simple non-equality for positions.
This is very simple-minded and just compares the position attribute
of the features; extensions are not considered at all. This could
potentially be expanded to try to take advantage of extensions.
"""
assert isinstance(other, AbstractPosition), \
"We can only do comparisons between Biopython Position objects."
return self.position != other.position
def __le__(self, other):
"""A simple less than or equal for positions.
This is very simple-minded and just compares the position attribute
of the features; extensions are not considered at all. This could
potentially be expanded to try to take advantage of extensions.
"""
assert isinstance(other, AbstractPosition), \
"We can only do comparisons between Biopython Position objects."
return self.position <= other.position
def __lt__(self, other):
"""A simple less than or equal for positions.
This is very simple-minded and just compares the position attribute
of the features; extensions are not considered at all. This could
potentially be expanded to try to take advantage of extensions.
"""
assert isinstance(other, AbstractPosition), \
"We can only do comparisons between Biopython Position objects."
return self.position < other.position
def __ge__(self, other):
"""A simple less than or equal for positions.
This is very simple-minded and just compares the position attribute
of the features; extensions are not considered at all. This could
potentially be expanded to try to take advantage of extensions.
"""
assert isinstance(other, AbstractPosition), \
"We can only do comparisons between Biopython Position objects."
return self.position >= other.position
def __gt__(self, other):
"""A simple less than or equal for positions.
This is very simple-minded and just compares the position attribute
of the features; extensions are not considered at all. This could
potentially be expanded to try to take advantage of extensions.
"""
assert isinstance(other, AbstractPosition), \
"We can only do comparisons between Biopython Position objects."
return self.position > other.position
def _shift(self, offset):
#We want this to maintain the subclass when called from a subclass
return self.__class__(self.position + offset, self.extension)
def _flip(self, length):
#We want this to maintain the subclass when called from a subclass
return self.__class__(length - self.position - self.extension,
self.extension)
class ExactPosition(AbstractPosition):
"""Specify the specific position of a boundary.
o position - The position of the boundary.
o extension - An optional argument which must be zero since we don't
have an extension. The argument is provided so that the same number of
arguments can be passed to all position types.
In this case, there is no fuzziness associated with the position.
"""
def __init__(self, position, extension = 0):
if extension != 0:
raise AttributeError("Non-zero extension %s for exact position."
% extension)
AbstractPosition.__init__(self, position, 0)
def __repr__(self):
"""String representation of the ExactPosition location for debugging."""
assert self.extension == 0
return "%s(%s)" % (self.__class__.__name__, repr(self.position))
def __str__(self):
return str(self.position)
class UncertainPosition(ExactPosition):
"""Specify a specific position which is uncertain.
This is used in UniProt, e.g. ?222 for uncertain position 222, or in the
XML format explicitly marked as uncertain. Does not apply to GenBank/EMBL.
"""
pass
class UnknownPosition(AbstractPosition):
"""Specify a specific position which is unknown (has no position).
This is used in UniProt, e.g. ? or in the XML as unknown.
"""
def __init__(self):
self.position = None
self.extension = None
pass
def __repr__(self):
"""String representation of the UnknownPosition location for debugging."""
return "%s()" % self.__class__.__name__
class WithinPosition(AbstractPosition):
"""Specify the position of a boundary within some coordinates.
Arguments:
o position - The start position of the boundary
o extension - The range to which the boundary can extend.
This allows dealing with a position like ((1.4)..100). This
indicates that the start of the sequence is somewhere between 1
and 4. To represent that with this class we would set position as
1 and extension as 3.
"""
def __init__(self, position, extension = 0):
AbstractPosition.__init__(self, position, extension)
def __str__(self):
return "(%s.%s)" % (self.position, self.position + self.extension)
class BetweenPosition(AbstractPosition):
"""Specify the position of a boundary between two coordinates (OBSOLETE?).
Arguments:
o position - The start position of the boundary.
o extension - The range to the other position of a boundary.
This specifies a coordinate which is found between the two positions.
So this allows us to deal with a position like ((1^2)..100). To
represent that with this class we set position as 1 and the
extension as 1.
"""
def __init__(self, position, extension = 0):
AbstractPosition.__init__(self, position, extension)
def __str__(self):
return "(%s^%s)" % (self.position, self.position + self.extension)
class BeforePosition(AbstractPosition):
"""Specify a position where the actual location occurs before it.
Arguments:
o position - The upper boundary of where the location can occur.
o extension - An optional argument which must be zero since we don't
have an extension. The argument is provided so that the same number of
arguments can be passed to all position types.
This is used to specify positions like (<10..100) where the location
occurs somewhere before position 10.
"""
def __init__(self, position, extension = 0):
if extension != 0:
raise AttributeError("Non-zero extension %s for exact position."
% extension)
AbstractPosition.__init__(self, position, 0)
def __repr__(self):
"""A string representation of the location for debugging."""
assert self.extension == 0
return "%s(%s)" % (self.__class__.__name__, repr(self.position))
def __str__(self):
return "<%s" % self.position
def _flip(self, length):
return AfterPosition(length - self.position)
class AfterPosition(AbstractPosition):
"""Specify a position where the actual location is found after it.
Arguments:
o position - The lower boundary of where the location can occur.
o extension - An optional argument which must be zero since we don't
have an extension. The argument is provided so that the same number of
arguments can be passed to all position types.
This is used to specify positions like (>10..100) where the location
occurs somewhere after position 10.
"""
def __init__(self, position, extension = 0):
if extension != 0:
raise AttributeError("Non-zero extension %s for exact position."
% extension)
AbstractPosition.__init__(self, position, 0)
def __repr__(self):
"""A string representation of the location for debugging."""
assert self.extension == 0
return "%s(%s)" % (self.__class__.__name__, repr(self.position))
def __str__(self):
return ">%s" % self.position
def _flip(self, length):
return BeforePosition(length - self.position)
class OneOfPosition(AbstractPosition):
"""Specify a position where the location can be multiple positions.
This models the GenBank 'one-of(1888,1901)' function, and tries
to make this fit within the Biopython Position models. In our case
the position of the "one-of" is set as the lowest choice, and the
extension is the range to the highest choice.
"""
def __init__(self, position_list):
"""Initialize with a set of posssible positions.
position_list is a list of AbstractPosition derived objects,
specifying possible locations.
"""
# unique attribute for this type of positions
self.position_choices = position_list
# find the smallest and largest position in the choices
smallest = None
largest = None
for position_choice in self.position_choices:
assert isinstance(position_choice, AbstractPosition), \
"Expected position objects, got %r" % position_choice
if smallest is None and largest is None:
smallest = position_choice.position
largest = position_choice.position
elif position_choice.position > largest:
largest = position_choice.position
elif position_choice.position < smallest:
smallest = position_choice.position
# initialize with our definition of position and extension
AbstractPosition.__init__(self, smallest, largest - smallest)
def __repr__(self):
"""String representation of the OneOfPosition location for debugging."""
return "%s(%s)" % (self.__class__.__name__, \
repr(self.position_choices))
def __str__(self):
out = "one-of("
for position in self.position_choices:
out += "%s," % position
# replace the last comma with the closing parenthesis
out = out[:-1] + ")"
return out
def _shift(self, offset):
return self.__class__([position_choice._shift(offset) \
for position_choice in self.position_choices])
def _flip(self, length):
return OneOfPosition([p._flip(length) for p in self.position_choices[::-1]])
class PositionGap(object):
"""Simple class to hold information about a gap between positions.
"""
def __init__(self, gap_size):
"""Intialize with a position object containing the gap information.
"""
self.gap_size = gap_size
def __repr__(self):
"""A string representation of the position gap for debugging."""
return "%s(%s)" % (self.__class__.__name__, repr(self.gap_size))
def __str__(self):
out = "gap(%s)" % self.gap_size
return out
def _test():
"""Run the Bio.SeqFeature module's doctests (PRIVATE).
This will try and locate the unit tests directory, and run the doctests
from there in order that the relative paths used in the examples work.
"""
import doctest
import os
if os.path.isdir(os.path.join("..","Tests")):
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("..","Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
elif os.path.isdir(os.path.join("Tests")) :
print "Runing doctests..."
cur_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join("Tests"))
doctest.testmod()
os.chdir(cur_dir)
del cur_dir
print "Done"
if __name__ == "__main__":
_test()
|
asherkhb/coge
|
bin/last_wrapper/Bio/SeqFeature.py
|
Python
|
bsd-2-clause
| 38,411
|
[
"Biopython"
] |
5b73f5f02d3e19ce4cce0732fb7e4b84d82b50b65cbcf02d130651622f6c53fb
|
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
"""
Geometry module:
This module contains classes to hold geometry-specific data and operations,
including any additional constraints. All geometry classes must implement
pad(), unpad() and get_size() methods.
1. Bulk: Data and operations for 3D bulk structures
2. Sheet: Data and operations for 2D sheet structures
3. Wire: Data and operations for 1D wire structures
4. Cluster: Data and operations for 0D cluster structures
"""
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import Site
import numpy as np
class Bulk(object):
'''
Contains data and operations specific to bulk structures (so not much...).
'''
def __init__(self):
'''
Makes a Bulk object.
'''
self.shape = 'bulk'
self.max_size = np.inf
self.min_size = -np.inf
self.padding = None
def pad(self, cell, padding='from_geometry'):
'''
Does nothing.
Args:
cell: the Cell to pad
padding: the amount of vacuum padding to add. If set to
'from_geometry', then the value in self.padding is used.
'''
pass
def unpad(self, cell, constraints):
'''
Does nothing.
Args:
cell: the Cell to unpad
constraints: the Constraints of the search
'''
pass
def get_size(self, cell):
'''
Returns 0.
Args:
cell: the Cell whose size to get
'''
return 0
class Sheet(object):
'''
Contains data and operations specific to sheet structures.
'''
def __init__(self, geometry_parameters):
'''
Makes a Sheet, and sets default parameter values if necessary.
Args:
geometry_parameters: a dictionary of parameters
'''
self.shape = 'sheet'
# default values
self.default_max_size = np.inf
self.default_min_size = -np.inf
self.default_padding = 10
# parse the parameters, and set defaults if necessary
# max size
if 'max_size' not in geometry_parameters:
self.max_size = self.default_max_size
elif geometry_parameters['max_size'] in (None, 'default'):
self.max_size = self.default_max_size
else:
self.max_size = geometry_parameters['max_size']
# min size
if 'min_size' not in geometry_parameters:
self.min_size = self.default_min_size
elif geometry_parameters['min_size'] in (None, 'default'):
self.min_size = self.default_min_size
else:
self.min_size = geometry_parameters['min_size']
# padding
if 'padding' not in geometry_parameters:
self.padding = self.default_padding
elif geometry_parameters['padding'] in (None, 'default'):
self.padding = self.default_padding
else:
self.padding = geometry_parameters['padding']
def pad(self, cell, padding='from_geometry'):
'''
Modifies a cell by adding vertical vacuum padding and making the
c-lattice vector normal to the plane of the sheet. The atoms are
shifted to the center of the padded sheet.
Args:
cell: the Cell to pad
padding: the amount of vacuum padding to add (in Angstroms). If not
set, then the value in self.padding is used.
'''
# get the padding amount
if padding == 'from_geometry':
pad_amount = self.padding
else:
pad_amount = padding
# make the padded lattice
cell.rotate_to_principal_directions()
species = cell.species
cartesian_coords = cell.cart_coords
cart_bounds = cell.get_bounding_box(cart_coords=True)
minz = cart_bounds[2][0]
maxz = cart_bounds[2][1]
layer_thickness = maxz - minz
ax = cell.lattice.matrix[0][0]
bx = cell.lattice.matrix[1][0]
by = cell.lattice.matrix[1][1]
padded_lattice = Lattice([[ax, 0.0, 0.0], [bx, by, 0.0],
[0.0, 0.0, layer_thickness + pad_amount]])
# modify the cell to correspond to the padded lattice
cell.modify_lattice(padded_lattice)
site_indices = []
for i in range(len(cell.sites)):
site_indices.append(i)
cell.remove_sites(site_indices)
for i in range(len(cartesian_coords)):
cell.append(species[i], cartesian_coords[i],
coords_are_cartesian=True)
# translate the atoms back into the cell if needed, and shift them to
# the vertical center
cell.translate_atoms_into_cell()
frac_bounds = cell.get_bounding_box(cart_coords=False)
z_center = frac_bounds[2][0] + (frac_bounds[2][1] -
frac_bounds[2][0])/2
translation_vector = [0, 0, 0.5 - z_center]
site_indices = [i for i in range(len(cell.sites))]
cell.translate_sites(site_indices, translation_vector,
frac_coords=True, to_unit_cell=False)
def unpad(self, cell, constraints):
'''
Modifies a cell by removing vertical vacuum padding, leaving only
enough to satisfy the per-species MID constraints, and makes the
c-lattice vector normal to the plane of the sheet (if it isn't
already).
Args:
cell: the Cell to unpad
constraints: the Constraints of the search
'''
# make the unpadded lattice
cell.rotate_to_principal_directions()
species = cell.species
cartesian_coords = cell.cart_coords
layer_thickness = self.get_size(cell)
max_mid = constraints.get_max_mid() + 0.01 # just to be safe...
ax = cell.lattice.matrix[0][0]
bx = cell.lattice.matrix[1][0]
by = cell.lattice.matrix[1][1]
unpadded_lattice = Lattice([[ax, 0.0, 0.0], [bx, by, 0.0],
[0.0, 0.0, layer_thickness + max_mid]])
# modify the cell to correspond to the unpadded lattice
cell.modify_lattice(unpadded_lattice)
site_indices = []
for i in range(len(cell.sites)):
site_indices.append(i)
cell.remove_sites(site_indices)
for i in range(len(cartesian_coords)):
cell.append(species[i], cartesian_coords[i],
coords_are_cartesian=True)
# translate the atoms back into the cell if needed, and shift them to
# the vertical center
cell.translate_atoms_into_cell()
frac_bounds = cell.get_bounding_box(cart_coords=False)
z_center = frac_bounds[2][0] + (frac_bounds[2][1] -
frac_bounds[2][0])/2
translation_vector = [0, 0, 0.5 - z_center]
site_indices = [i for i in range(len(cell.sites))]
cell.translate_sites(site_indices, translation_vector,
frac_coords=True, to_unit_cell=False)
def get_size(self, cell):
'''
Returns the layer thickness of a sheet structure, which is the maximum
vertical distance between atoms in the cell.
Precondition: the cell has already been put into sheet format (c
lattice vector parallel to the z-axis and a and b lattice vectors
in the x-y plane)
Args:
cell: the Cell whose size to get
'''
cart_bounds = cell.get_bounding_box(cart_coords=True)
layer_thickness = cart_bounds[2][1] - cart_bounds[2][0]
return layer_thickness
class Wire(object):
'''
Contains data and operations specific to wire structures.
'''
def __init__(self, geometry_parameters):
'''
Makes a Wire, and sets default parameter values if necessary.
Args:
geometry_parameters: a dictionary of parameters
'''
self.shape = 'wire'
# default values
self.default_max_size = np.inf
self.default_min_size = -np.inf
self.default_padding = 10
# parse the parameters, and set defaults if necessary
# max size
if 'max_size' not in geometry_parameters:
self.max_size = self.default_max_size
elif geometry_parameters['max_size'] in (None, 'default'):
self.max_size = self.default_max_size
else:
self.max_size = geometry_parameters['max_size']
# min size
if 'min_size' not in geometry_parameters:
self.min_size = self.default_min_size
elif geometry_parameters['min_size'] in (None, 'default'):
self.min_size = self.default_min_size
else:
self.min_size = geometry_parameters['min_size']
# padding
if 'padding' not in geometry_parameters:
self.padding = self.default_padding
elif geometry_parameters['padding'] in (None, 'default'):
self.padding = self.default_padding
else:
self.padding = geometry_parameters['padding']
def pad(self, cell, padding='from_geometry'):
'''
Modifies a cell by making the c lattice vector parallel to z-axis, and
adds vacuum padding around the structure in the x and y directions by
replacing a and b lattice vectors with padded vectors along the x and y
axes, respectively. The atoms are shifted to the center of the padded
cell.
Args:
cell: the Cell to pad
padding: the amount of vacuum padding to add (in Angstroms). If not
set, then the value in self.padding is used.
'''
# get the padding amount
if padding == 'from_geometry':
pad_amount = self.padding
else:
pad_amount = padding
# make the padded lattice
cell.rotate_c_parallel_to_z()
species = cell.species
cartesian_coords = cell.cart_coords
cart_bounds = cell.get_bounding_box(cart_coords=True)
x_min = cart_bounds[0][0]
x_max = cart_bounds[0][1]
y_min = cart_bounds[1][0]
y_max = cart_bounds[1][1]
x_extent = x_max - x_min
y_extent = y_max - y_min
cz = cell.lattice.matrix[2][2]
padded_lattice = Lattice([[x_extent + pad_amount, 0, 0],
[0, y_extent + pad_amount, 0], [0, 0, cz]])
# modify the cell to correspond to the padded lattice
cell.modify_lattice(padded_lattice)
site_indices = []
for i in range(len(cell.sites)):
site_indices.append(i)
cell.remove_sites(site_indices)
for i in range(len(cartesian_coords)):
cell.append(species[i], cartesian_coords[i],
coords_are_cartesian=True)
# translate the atoms back into the cell if needed, and shift them to
# the horizontal center
cell.translate_atoms_into_cell()
frac_bounds = cell.get_bounding_box(cart_coords=False)
x_center = frac_bounds[0][0] + (frac_bounds[0][1] -
frac_bounds[0][0])/2
y_center = frac_bounds[1][0] + (frac_bounds[1][1] -
frac_bounds[1][0])/2
translation_vector = [0.5 - x_center, 0.5 - y_center, 0.0]
site_indices = [i for i in range(len(cell.sites))]
cell.translate_sites(site_indices, translation_vector,
frac_coords=True, to_unit_cell=False)
def unpad(self, cell, constraints):
'''
Modifies a cell by removing horizontal vacuum padding around a wire,
leaving only enough to satisfy the per-species MID constraints, and
makes the three lattice vectors lie along the three Cartesian
directions.
Args:
cell: the Cell to unpad
constraints: the Constraints of the search
'''
# make the unpadded lattice
cell.rotate_c_parallel_to_z()
species = cell.species
cartesian_coords = cell.cart_coords
cart_bounds = cell.get_bounding_box(cart_coords=True)
x_min = cart_bounds[0][0]
x_max = cart_bounds[0][1]
y_min = cart_bounds[1][0]
y_max = cart_bounds[1][1]
x_extent = x_max - x_min
y_extent = y_max - y_min
cz = cell.lattice.matrix[2][2]
max_mid = constraints.get_max_mid() + 0.01 # just to be safe...
unpadded_lattice = Lattice([[x_extent + max_mid, 0.0, 0.0],
[0, y_extent + max_mid, 0.0],
[0.0, 0.0, cz]])
# modify the cell to correspond to the unpadded lattice
cell.modify_lattice(unpadded_lattice)
site_indices = []
for i in range(len(cell.sites)):
site_indices.append(i)
cell.remove_sites(site_indices)
for i in range(len(cartesian_coords)):
cell.append(species[i], cartesian_coords[i],
coords_are_cartesian=True)
# translate the atoms back into the cell if needed, and shift them to
# the horizontal center
cell.translate_atoms_into_cell()
frac_bounds = cell.get_bounding_box(cart_coords=False)
x_center = frac_bounds[0][0] + (frac_bounds[0][1] -
frac_bounds[0][0])/2
y_center = frac_bounds[1][0] + (frac_bounds[1][1] -
frac_bounds[1][0])/2
translation_vector = [0.5 - x_center, 0.5 - y_center, 0.0]
site_indices = [i for i in range(len(cell.sites))]
cell.translate_sites(site_indices, translation_vector,
frac_coords=True, to_unit_cell=False)
def get_size(self, cell):
'''
Returns the diameter of a wire structure, defined as the maximum
distance between atoms projected to the x-y plane.
Precondition: the cell has already been put into wire format (c
lattice vector is parallel to z-axis and a and b lattice vectors in
the x-y plane), and all sites are located inside the cell (i.e.,
have fractional coordinates between 0 and 1).
Args:
cell: the Cell whose size to get
'''
max_distance = 0
for site_i in cell.sites:
# make Site versions of each PeriodicSite so that the computed
# distance won't include periodic images
non_periodic_site_i = Site(site_i.species_and_occu,
[site_i.coords[0], site_i.coords[1],
0.0])
for site_j in cell.sites:
non_periodic_site_j = Site(site_j.species_and_occu,
[site_j.coords[0], site_j.coords[1],
0.0])
distance = non_periodic_site_i.distance(non_periodic_site_j)
if distance > max_distance:
max_distance = distance
return max_distance
class Cluster(object):
'''
Contains data and operations specific to clusters.
'''
def __init__(self, geometry_parameters):
'''
Makes a Cluster, and sets default parameter values if necessary.
Args:
geometry_parameters: a dictionary of parameters
'''
self.shape = 'cluster'
# default values
self.default_max_size = np.inf
self.default_min_size = -np.inf
self.default_padding = 10
# parse the parameters, and set defaults if necessary
# max size
if 'max_size' not in geometry_parameters:
self.max_size = self.default_max_size
elif geometry_parameters['max_size'] in (None, 'default'):
self.max_size = self.default_max_size
else:
self.max_size = geometry_parameters['max_size']
# min size
if 'min_size' not in geometry_parameters:
self.min_size = self.default_min_size
elif geometry_parameters['min_size'] in (None, 'default'):
self.min_size = self.default_min_size
else:
self.min_size = geometry_parameters['min_size']
# padding
if 'padding' not in geometry_parameters:
self.padding = self.default_padding
elif geometry_parameters['padding'] in (None, 'default'):
self.padding = self.default_padding
else:
self.padding = geometry_parameters['padding']
def pad(self, cell, padding='from_geometry'):
'''
Modifies a cell by replacing the three lattice vectors with ones along
the three Cartesian directions and adding vacuum padding to each one.
The atoms are shifted to the center of the padded cell.
Args:
cell: the Cell to pad
padding: the amount of vacuum padding to add (in Angstroms). If not
set, then the value in self.padding is used.
'''
# get the padding amount
if padding == 'from_geometry':
pad_amount = self.padding
else:
pad_amount = padding
# make the padded lattice
species = cell.species
cartesian_coords = cell.cart_coords
cart_bounds = cell.get_bounding_box(cart_coords=True)
x_min = cart_bounds[0][0]
x_max = cart_bounds[0][1]
y_min = cart_bounds[1][0]
y_max = cart_bounds[1][1]
z_min = cart_bounds[2][0]
z_max = cart_bounds[2][1]
x_extent = x_max - x_min
y_extent = y_max - y_min
z_extent = z_max - z_min
padded_lattice = Lattice([[x_extent + pad_amount, 0, 0],
[0, y_extent + pad_amount, 0],
[0, 0, z_extent + pad_amount]])
# modify the cell to correspond to the padded lattice
cell.modify_lattice(padded_lattice)
site_indices = []
for i in range(len(cell.sites)):
site_indices.append(i)
cell.remove_sites(site_indices)
for i in range(len(cartesian_coords)):
cell.append(species[i], cartesian_coords[i],
coords_are_cartesian=True)
# translate the atoms back into the cell if needed, and shift them to
# the center
cell.translate_atoms_into_cell()
frac_bounds = cell.get_bounding_box(cart_coords=False)
x_center = frac_bounds[0][0] + (frac_bounds[0][1] -
frac_bounds[0][0])/2
y_center = frac_bounds[1][0] + (frac_bounds[1][1] -
frac_bounds[1][0])/2
z_center = frac_bounds[2][0] + (frac_bounds[2][1] -
frac_bounds[2][0])/2
translation_vector = [0.5 - x_center, 0.5 - y_center, 0.5 - z_center]
site_indices = [i for i in range(len(cell.sites))]
cell.translate_sites(site_indices, translation_vector,
frac_coords=True, to_unit_cell=False)
def unpad(self, cell, constraints):
'''
Modifies a cell by removing vacuum padding in every direction, leaving
only enough to satisfy the per-species MID constraints, and makes the
three lattice vectors lie along the three Cartesian directions.
Args:
cell: the Cell to unpad
constraints: the Constraints of the search
'''
# make the unpadded lattice
species = cell.species
cartesian_coords = cell.cart_coords
cart_bounds = cell.get_bounding_box(cart_coords=True)
x_min = cart_bounds[0][0]
x_max = cart_bounds[0][1]
y_min = cart_bounds[1][0]
y_max = cart_bounds[1][1]
z_min = cart_bounds[2][0]
z_max = cart_bounds[2][1]
x_extent = x_max - x_min
y_extent = y_max - y_min
z_extent = z_max - z_min
max_mid = constraints.get_max_mid() + 0.01 # just to be safe...
unpadded_lattice = Lattice([[x_extent + max_mid, 0.0, 0.0],
[0, y_extent + max_mid, 0.0],
[0.0, 0.0, z_extent + max_mid]])
# modify the cell to correspond to the unpadded lattice
cell.modify_lattice(unpadded_lattice)
site_indices = []
for i in range(len(cell.sites)):
site_indices.append(i)
cell.remove_sites(site_indices)
for i in range(len(cartesian_coords)):
cell.append(species[i], cartesian_coords[i],
coords_are_cartesian=True)
# translate the atoms back into the cell if needed, and shift them to
# the center
cell.translate_atoms_into_cell()
frac_bounds = cell.get_bounding_box(cart_coords=False)
x_center = frac_bounds[0][0] + (frac_bounds[0][1] -
frac_bounds[0][0])/2
y_center = frac_bounds[1][0] + (frac_bounds[1][1] -
frac_bounds[1][0])/2
z_center = frac_bounds[2][0] + (frac_bounds[2][1] -
frac_bounds[2][0])/2
translation_vector = [0.5 - x_center, 0.5 - y_center, 0.5 - z_center]
site_indices = [i for i in range(len(cell.sites))]
cell.translate_sites(site_indices, translation_vector,
frac_coords=True, to_unit_cell=False)
def get_size(self, cell):
'''
Returns the diameter of a cluster structure, defined as the maximum
distance between atoms in the cell.
Precondition: all sites are located inside the cell (i.e., have
fractional coordinates between 0 and 1)
Args:
cell: the Cell whose size to get
'''
max_distance = 0
for site_i in cell.sites:
# make Site versions of each PeriodicSite so that the computed
# distance won't include periodic images
non_periodic_site_i = Site(site_i.species_and_occu, site_i.coords)
for site_j in cell.sites:
non_periodic_site_j = Site(site_j.species_and_occu,
site_j.coords)
distance = non_periodic_site_i.distance(non_periodic_site_j)
if distance > max_distance:
max_distance = distance
return max_distance
|
henniggroup/GASP-python
|
gasp/geometry.py
|
Python
|
mit
| 22,771
|
[
"pymatgen"
] |
a4b2164b8bce0c581d7197f63cec61455bb8d6323c8384629bbfb910cf738b96
|
# -*- coding: utf-8 -*-
"""Test functions for probability distributions.
"""
# Author: Taku Yoshioka
# License: MIT
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_allclose
from scipy import stats
from scipy.special import gamma
from bmlingam.prob import ll_laplace, ll_gg, sample_gg
def test_laplace_gg(plot=False):
"""Check if the outputs of ll_laplace() and ll_gg(, beta=0.5).
Outputs should be equivalent up to numerical error.
"""
xs = np.arange(-10., 10., .2)
out1 = ll_laplace(xs)
out2 = ll_gg(xs, beta=.5)
if plot:
plt.plot(xs, out1, 'b')
plt.plot(xs, out2, 'g')
def _describe_and_check(txt, xs, ss):
d = stats.describe(xs)
print(txt)
print('Mean: {}'.format(d.mean))
print('Var : {}'.format(d.variance))
print('Skew: {}'.format(d.skewness))
print('Kurt: {}'.format(d.kurtosis))
assert_allclose([d.mean, d.variance, d.skewness, d.kurtosis],
ss, rtol=5e-2, atol=5e-2)
def _mv_kurtosis(xs):
dim = xs.shape[1]
prec = np.linalg.pinv(np.cov(xs.T))
xs_ = xs - xs.mean(axis=0)
print(xs_.shape, prec.shape)
xpx = np.sum((xs_.dot(prec)) * xs_, axis=1)
k = np.mean(xpx**2) - dim * (dim + 2)
print('Mv kurtosis: {}'.format(k))
return k
def test_sample_gg(n_samples=1000000, plot=False):
"""Tests for generalized Gaussian.
"""
rng = np.random.RandomState(0)
# Test 1
print('Test1')
dim = 2
scov = np.eye(dim)
beta = 1.0
xs = sample_gg(scov, beta, n_samples, rng, dim, normalize=True)
_describe_and_check('xs[:, 0]', xs[:, 0], [0, 1, 0, 0])
_describe_and_check('xs[:, 1]', xs[:, 1], [0, 1, 0, 0])
# Test 2
print('\nTest2')
dim = 2
scov = np.array([[1.0, 0.5], [0.5, 1.0]])
beta = 1.0
xs = sample_gg(scov, beta, n_samples, rng, dim, normalize=True)
_describe_and_check('xs[:, 0]', xs[:, 0], [0, 1, 0, 0])
_describe_and_check('xs[:, 1]', xs[:, 1], [0, 1, 0, 0])
# Test 3
print('\nTest3')
dim = 1
scov = np.eye(dim)
beta = 0.5
xs = sample_gg(scov, beta, n_samples, rng, dim, normalize=True)
_describe_and_check('xs', xs.ravel(), [0, 1, 0, 3])
# Test 4
print('\nTest4')
dim = 2
scov = np.eye(dim)
beta = 0.5
xs = sample_gg(scov, beta, n_samples, rng, dim, normalize=True)
k = _mv_kurtosis(xs)
k_true = ((dim**2) * gamma(dim / (2 * beta)) * gamma((dim + 4) / (2 * beta))) / \
(gamma((dim + 2) / (2 * beta))**2) - dim * (dim + 2)
print('True: {}\n'.format(k_true))
assert_allclose(k, k_true, atol=5e-2, rtol=5e-2)
|
taku-y/bmlingam
|
bmlingam/tests/test_prob.py
|
Python
|
mit
| 2,731
|
[
"Gaussian"
] |
d1555309c5e655c1bb9426d17671ee56db240aac2acb6e909e3af13992398a6d
|
"""Provides all the data related to text."""
__all__ = ["WORDLIST"]
WORDLIST = [
"abandon",
"ability",
"able",
"about",
"above",
"absent",
"absorb",
"abstract",
"absurd",
"abuse",
"access",
"accident",
"account",
"accuse",
"achieve",
"acid",
"acoustic",
"acquire",
"across",
"act",
"action",
"actor",
"actress",
"actual",
"adapt",
"add",
"addict",
"address",
"adjust",
"admit",
"adult",
"advance",
"advice",
"aerobic",
"affair",
"afford",
"afraid",
"again",
"age",
"agent",
"agree",
"ahead",
"aim",
"air",
"airport",
"aisle",
"alarm",
"album",
"alcohol",
"alert",
"alien",
"all",
"alley",
"allow",
"almost",
"alone",
"alpha",
"already",
"also",
"alter",
"always",
"amateur",
"amazing",
"among",
"amount",
"amused",
"analyst",
"anchor",
"ancient",
"anger",
"angle",
"angry",
"animal",
"ankle",
"announce",
"annual",
"another",
"answer",
"antenna",
"antique",
"anxiety",
"any",
"apart",
"apology",
"appear",
"apple",
"approve",
"april",
"arch",
"arctic",
"area",
"arena",
"argue",
"arm",
"armed",
"armor",
"army",
"around",
"arrange",
"arrest",
"arrive",
"arrow",
"art",
"artefact",
"artist",
"artwork",
"ask",
"aspect",
"assault",
"asset",
"assist",
"assume",
"asthma",
"athlete",
"atom",
"attack",
"attend",
"attitude",
"attract",
"auction",
"audit",
"august",
"aunt",
"author",
"auto",
"autumn",
"average",
"avocado",
"avoid",
"awake",
"aware",
"away",
"awesome",
"awful",
"awkward",
"axis",
"baby",
"bachelor",
"bacon",
"badge",
"bag",
"balance",
"balcony",
"ball",
"bamboo",
"banana",
"banner",
"bar",
"barely",
"bargain",
"barrel",
"base",
"basic",
"basket",
"battle",
"beach",
"bean",
"beauty",
"because",
"become",
"beef",
"before",
"begin",
"behave",
"behind",
"believe",
"below",
"belt",
"bench",
"benefit",
"best",
"betray",
"better",
"between",
"beyond",
"bicycle",
"bid",
"bike",
"bind",
"biology",
"bird",
"birth",
"bitter",
"black",
"blade",
"blame",
"blanket",
"blast",
"bleak",
"bless",
"blind",
"blood",
"blossom",
"blouse",
"blue",
"blur",
"blush",
"board",
"boat",
"body",
"boil",
"bomb",
"bone",
"bonus",
"book",
"boost",
"border",
"boring",
"borrow",
"boss",
"bottom",
"bounce",
"box",
"boy",
"bracket",
"brain",
"brand",
"brass",
"brave",
"bread",
"breeze",
"brick",
"bridge",
"brief",
"bright",
"bring",
"brisk",
"broccoli",
"broken",
"bronze",
"broom",
"brother",
"brown",
"brush",
"bubble",
"buddy",
"budget",
"buffalo",
"build",
"bulb",
"bulk",
"bullet",
"bundle",
"bunker",
"burden",
"burger",
"burst",
"bus",
"business",
"busy",
"butter",
"buyer",
"buzz",
"cabbage",
"cabin",
"cable",
"cactus",
"cage",
"cake",
"call",
"calm",
"camera",
"camp",
"can",
"canal",
"cancel",
"candy",
"cannon",
"canoe",
"canvas",
"canyon",
"capable",
"capital",
"captain",
"car",
"carbon",
"card",
"cargo",
"carpet",
"carry",
"cart",
"case",
"cash",
"casino",
"castle",
"casual",
"cat",
"catalog",
"catch",
"category",
"cattle",
"caught",
"cause",
"caution",
"cave",
"ceiling",
"celery",
"cement",
"census",
"century",
"cereal",
"certain",
"chair",
"chalk",
"champion",
"change",
"chaos",
"chapter",
"charge",
"chase",
"chat",
"cheap",
"check",
"cheese",
"chef",
"cherry",
"chest",
"chicken",
"chief",
"child",
"chimney",
"choice",
"choose",
"chronic",
"chuckle",
"chunk",
"churn",
"cigar",
"cinnamon",
"circle",
"citizen",
"city",
"civil",
"claim",
"clap",
"clarify",
"claw",
"clay",
"clean",
"clerk",
"clever",
"click",
"client",
"cliff",
"climb",
"clinic",
"clip",
"clock",
"clog",
"close",
"cloth",
"cloud",
"clown",
"club",
"clump",
"cluster",
"clutch",
"coach",
"coast",
"coconut",
"code",
"coffee",
"coil",
"coin",
"collect",
"color",
"column",
"combine",
"come",
"comfort",
"comic",
"common",
"company",
"concert",
"conduct",
"confirm",
"congress",
"connect",
"consider",
"control",
"convince",
"cook",
"cool",
"copper",
"copy",
"coral",
"core",
"corn",
"correct",
"cost",
"cotton",
"couch",
"country",
"couple",
"course",
"cousin",
"cover",
"coyote",
"crack",
"cradle",
"craft",
"cram",
"crane",
"crash",
"crater",
"crawl",
"crazy",
"cream",
"credit",
"creek",
"crew",
"cricket",
"crime",
"crisp",
"critic",
"crop",
"cross",
"crouch",
"crowd",
"crucial",
"cruel",
"cruise",
"crumble",
"crunch",
"crush",
"cry",
"crystal",
"cube",
"culture",
"cup",
"cupboard",
"curious",
"current",
"curtain",
"curve",
"cushion",
"custom",
"cute",
"cycle",
"dad",
"damage",
"damp",
"dance",
"danger",
"daring",
"dash",
"daughter",
"dawn",
"day",
"deal",
"debate",
"debris",
"decade",
"december",
"decide",
"decline",
"decorate",
"decrease",
"deer",
"defense",
"define",
"defy",
"degree",
"delay",
"deliver",
"demand",
"demise",
"denial",
"dentist",
"deny",
"depart",
"depend",
"deposit",
"depth",
"deputy",
"derive",
"describe",
"desert",
"design",
"desk",
"despair",
"destroy",
"detail",
"detect",
"develop",
"device",
"devote",
"diagram",
"dial",
"diamond",
"diary",
"dice",
"diesel",
"diet",
"differ",
"digital",
"dignity",
"dilemma",
"dinner",
"dinosaur",
"direct",
"dirt",
"disagree",
"discover",
"disease",
"dish",
"dismiss",
"disorder",
"display",
"distance",
"divert",
"divide",
"divorce",
"dizzy",
"doctor",
"document",
"dog",
"doll",
"dolphin",
"domain",
"donate",
"donkey",
"donor",
"door",
"dose",
"double",
"dove",
"draft",
"dragon",
"drama",
"drastic",
"draw",
"dream",
"dress",
"drift",
"drill",
"drink",
"drip",
"drive",
"drop",
"drum",
"dry",
"duck",
"dumb",
"dune",
"during",
"dust",
"dutch",
"duty",
"dwarf",
"dynamic",
"eager",
"eagle",
"early",
"earn",
"earth",
"easily",
"east",
"easy",
"echo",
"ecology",
"economy",
"edge",
"edit",
"educate",
"effort",
"egg",
"eight",
"either",
"elbow",
"elder",
"electric",
"elegant",
"element",
"elephant",
"elevator",
"elite",
"else",
"embark",
"embody",
"embrace",
"emerge",
"emotion",
"employ",
"empower",
"empty",
"enable",
"enact",
"end",
"endless",
"endorse",
"enemy",
"energy",
"enforce",
"engage",
"engine",
"enhance",
"enjoy",
"enlist",
"enough",
"enrich",
"enroll",
"ensure",
"enter",
"entire",
"entry",
"envelope",
"episode",
"equal",
"equip",
"era",
"erase",
"erode",
"erosion",
"error",
"erupt",
"escape",
"essay",
"essence",
"estate",
"eternal",
"ethics",
"evidence",
"evil",
"evoke",
"evolve",
"exact",
"example",
"excess",
"exchange",
"excite",
"exclude",
"excuse",
"execute",
"exercise",
"exhaust",
"exhibit",
"exile",
"exist",
"exit",
"exotic",
"expand",
"expect",
"expire",
"explain",
"expose",
"express",
"extend",
"extra",
"eye",
"eyebrow",
"fabric",
"face",
"faculty",
"fade",
"faint",
"faith",
"fall",
"false",
"fame",
"family",
"famous",
"fan",
"fancy",
"fantasy",
"farm",
"fashion",
"fat",
"fatal",
"father",
"fatigue",
"fault",
"favorite",
"feature",
"february",
"federal",
"fee",
"feed",
"feel",
"female",
"fence",
"festival",
"fetch",
"fever",
"few",
"fiber",
"fiction",
"field",
"figure",
"file",
"film",
"filter",
"final",
"find",
"fine",
"finger",
"finish",
"fire",
"firm",
"first",
"fiscal",
"fish",
"fit",
"fitness",
"fix",
"flag",
"flame",
"flash",
"flat",
"flavor",
"flee",
"flight",
"flip",
"float",
"flock",
"floor",
"flower",
"fluid",
"flush",
"fly",
"foam",
"focus",
"fog",
"foil",
"fold",
"follow",
"food",
"foot",
"force",
"forest",
"forget",
"fork",
"fortune",
"forum",
"forward",
"fossil",
"foster",
"found",
"fox",
"fragile",
"frame",
"frequent",
"fresh",
"friend",
"fringe",
"frog",
"front",
"frost",
"frown",
"frozen",
"fruit",
"fuel",
"fun",
"funny",
"furnace",
"fury",
"future",
"gadget",
"gain",
"galaxy",
"gallery",
"game",
"gap",
"garage",
"garbage",
"garden",
"garlic",
"garment",
"gas",
"gasp",
"gate",
"gather",
"gauge",
"gaze",
"general",
"genius",
"genre",
"gentle",
"genuine",
"gesture",
"ghost",
"giant",
"gift",
"giggle",
"ginger",
"giraffe",
"girl",
"give",
"glad",
"glance",
"glare",
"glass",
"glide",
"glimpse",
"globe",
"gloom",
"glory",
"glove",
"glow",
"glue",
"goat",
"goddess",
"gold",
"good",
"goose",
"gorilla",
"gospel",
"gossip",
"govern",
"gown",
"grab",
"grace",
"grain",
"grant",
"grape",
"grass",
"gravity",
"great",
"green",
"grid",
"grief",
"grit",
"grocery",
"group",
"grow",
"grunt",
"guard",
"guess",
"guide",
"guilt",
"guitar",
"gun",
"gym",
"habit",
"hair",
"half",
"hammer",
"hamster",
"hand",
"happy",
"harbor",
"hard",
"harsh",
"harvest",
"hat",
"have",
"hawk",
"hazard",
"head",
"health",
"heart",
"heavy",
"hedgehog",
"height",
"hello",
"helmet",
"help",
"hen",
"hero",
"hidden",
"high",
"hill",
"hint",
"hip",
"hire",
"history",
"hobby",
"hockey",
"hold",
"hole",
"holiday",
"hollow",
"home",
"honey",
"hood",
"hope",
"horn",
"horror",
"horse",
"hospital",
"host",
"hotel",
"hour",
"hover",
"hub",
"huge",
"human",
"humble",
"humor",
"hundred",
"hungry",
"hunt",
"hurdle",
"hurry",
"hurt",
"husband",
"hybrid",
"ice",
"icon",
"idea",
"identify",
"idle",
"ignore",
"ill",
"illegal",
"illness",
"image",
"imitate",
"immense",
"immune",
"impact",
"impose",
"improve",
"impulse",
"inch",
"include",
"income",
"increase",
"index",
"indicate",
"indoor",
"industry",
"infant",
"inflict",
"inform",
"inhale",
"inherit",
"initial",
"inject",
"injury",
"inmate",
"inner",
"innocent",
"input",
"inquiry",
"insane",
"insect",
"inside",
"inspire",
"install",
"intact",
"interest",
"into",
"invest",
"invite",
"involve",
"iron",
"island",
"isolate",
"issue",
"item",
"ivory",
"jacket",
"jaguar",
"jar",
"jazz",
"jealous",
"jeans",
"jelly",
"jewel",
"job",
"join",
"joke",
"journey",
"joy",
"judge",
"juice",
"jump",
"jungle",
"junior",
"junk",
"just",
"kangaroo",
"keen",
"keep",
"ketchup",
"key",
"kick",
"kid",
"kidney",
"kind",
"kingdom",
"kiss",
"kit",
"kitchen",
"kite",
"kitten",
"kiwi",
"knee",
"knife",
"knock",
"know",
"lab",
"label",
"labor",
"ladder",
"lady",
"lake",
"lamp",
"language",
"laptop",
"large",
"later",
"latin",
"laugh",
"laundry",
"lava",
"law",
"lawn",
"lawsuit",
"layer",
"lazy",
"leader",
"leaf",
"learn",
"leave",
"lecture",
"left",
"leg",
"legal",
"legend",
"leisure",
"lemon",
"lend",
"length",
"lens",
"leopard",
"lesson",
"letter",
"level",
"liar",
"liberty",
"library",
"license",
"life",
"lift",
"light",
"like",
"limb",
"limit",
"link",
"lion",
"liquid",
"list",
"little",
"live",
"lizard",
"load",
"loan",
"lobster",
"local",
"lock",
"logic",
"lonely",
"long",
"loop",
"lottery",
"loud",
"lounge",
"love",
"loyal",
"lucky",
"luggage",
"lumber",
"lunar",
"lunch",
"luxury",
"lyrics",
"machine",
"mad",
"magic",
"magnet",
"maid",
"mail",
"main",
"major",
"make",
"mammal",
"man",
"manage",
"mandate",
"mango",
"mansion",
"manual",
"maple",
"marble",
"march",
"margin",
"marine",
"market",
"marriage",
"mask",
"mass",
"master",
"match",
"material",
"math",
"matrix",
"matter",
"maximum",
"maze",
"meadow",
"mean",
"measure",
"meat",
"mechanic",
"medal",
"media",
"melody",
"melt",
"member",
"memory",
"mention",
"menu",
"mercy",
"merge",
"merit",
"merry",
"mesh",
"message",
"metal",
"method",
"middle",
"midnight",
"milk",
"million",
"mimic",
"mind",
"minimum",
"minor",
"minute",
"miracle",
"mirror",
"misery",
"miss",
"mistake",
"mix",
"mixed",
"mixture",
"mobile",
"model",
"modify",
"mom",
"moment",
"monitor",
"monkey",
"monster",
"month",
"moon",
"moral",
"more",
"morning",
"mosquito",
"mother",
"motion",
"motor",
"mountain",
"mouse",
"move",
"movie",
"much",
"muffin",
"mule",
"multiply",
"muscle",
"museum",
"mushroom",
"music",
"must",
"mutual",
"myself",
"mystery",
"myth",
"naive",
"name",
"napkin",
"narrow",
"nasty",
"nation",
"nature",
"near",
"neck",
"need",
"negative",
"neglect",
"neither",
"nephew",
"nerve",
"nest",
"net",
"network",
"neutral",
"never",
"news",
"next",
"nice",
"night",
"noble",
"noise",
"nominee",
"noodle",
"normal",
"north",
"nose",
"notable",
"note",
"nothing",
"notice",
"novel",
"now",
"nuclear",
"number",
"nurse",
"nut",
"oak",
"obey",
"object",
"oblige",
"obscure",
"observe",
"obtain",
"obvious",
"occur",
"ocean",
"october",
"odor",
"off",
"offer",
"office",
"often",
"oil",
"okay",
"old",
"olive",
"olympic",
"omit",
"once",
"one",
"onion",
"online",
"only",
"open",
"opera",
"opinion",
"oppose",
"option",
"orange",
"orbit",
"orchard",
"order",
"ordinary",
"organ",
"orient",
"original",
"orphan",
"ostrich",
"other",
"outdoor",
"outer",
"output",
"outside",
"oval",
"oven",
"over",
"own",
"owner",
"oxygen",
"oyster",
"ozone",
"pact",
"paddle",
"page",
"pair",
"palace",
"palm",
"panda",
"panel",
"panic",
"panther",
"paper",
"parade",
"parent",
"park",
"parrot",
"party",
"pass",
"patch",
"path",
"patient",
"patrol",
"pattern",
"pause",
"pave",
"payment",
"peace",
"peanut",
"pear",
"peasant",
"pelican",
"pen",
"penalty",
"pencil",
"people",
"pepper",
"perfect",
"permit",
"person",
"pet",
"phone",
"photo",
"phrase",
"physical",
"piano",
"picnic",
"picture",
"piece",
"pig",
"pigeon",
"pill",
"pilot",
"pink",
"pioneer",
"pipe",
"pistol",
"pitch",
"pizza",
"place",
"planet",
"plastic",
"plate",
"play",
"please",
"pledge",
"pluck",
"plug",
"plunge",
"poem",
"poet",
"point",
"polar",
"pole",
"police",
"pond",
"pony",
"pool",
"popular",
"portion",
"position",
"possible",
"post",
"potato",
"pottery",
"poverty",
"powder",
"power",
"practice",
"praise",
"predict",
"prefer",
"prepare",
"present",
"pretty",
"prevent",
"price",
"pride",
"primary",
"print",
"priority",
"prison",
"private",
"prize",
"problem",
"process",
"produce",
"profit",
"program",
"project",
"promote",
"proof",
"property",
"prosper",
"protect",
"proud",
"provide",
"public",
"pudding",
"pull",
"pulp",
"pulse",
"pumpkin",
"punch",
"pupil",
"puppy",
"purchase",
"purity",
"purpose",
"purse",
"push",
"put",
"puzzle",
"pyramid",
"quality",
"quantum",
"quarter",
"question",
"quick",
"quit",
"quiz",
"quote",
"rabbit",
"raccoon",
"race",
"rack",
"radar",
"radio",
"rail",
"rain",
"raise",
"rally",
"ramp",
"ranch",
"random",
"range",
"rapid",
"rare",
"rate",
"rather",
"raven",
"raw",
"razor",
"ready",
"real",
"reason",
"rebel",
"rebuild",
"recall",
"receive",
"recipe",
"record",
"recycle",
"reduce",
"reflect",
"reform",
"refuse",
"region",
"regret",
"regular",
"reject",
"relax",
"release",
"relief",
"rely",
"remain",
"remember",
"remind",
"remove",
"render",
"renew",
"rent",
"reopen",
"repair",
"repeat",
"replace",
"report",
"require",
"rescue",
"resemble",
"resist",
"resource",
"response",
"result",
"retire",
"retreat",
"return",
"reunion",
"reveal",
"review",
"reward",
"rhythm",
"rib",
"ribbon",
"rice",
"rich",
"ride",
"ridge",
"rifle",
"right",
"rigid",
"ring",
"riot",
"ripple",
"risk",
"ritual",
"rival",
"river",
"road",
"roast",
"robot",
"robust",
"rocket",
"romance",
"roof",
"rookie",
"room",
"rose",
"rotate",
"rough",
"round",
"route",
"royal",
"rubber",
"rude",
"rug",
"rule",
"run",
"runway",
"rural",
"sad",
"saddle",
"sadness",
"safe",
"sail",
"salad",
"salmon",
"salon",
"salt",
"salute",
"same",
"sample",
"sand",
"satisfy",
"satoshi",
"sauce",
"sausage",
"save",
"say",
"scale",
"scan",
"scare",
"scatter",
"scene",
"scheme",
"school",
"science",
"scissors",
"scorpion",
"scout",
"scrap",
"screen",
"script",
"scrub",
"sea",
"search",
"season",
"seat",
"second",
"secret",
"section",
"security",
"seed",
"seek",
"segment",
"select",
"sell",
"seminar",
"senior",
"sense",
"sentence",
"series",
"service",
"session",
"settle",
"setup",
"seven",
"shadow",
"shaft",
"shallow",
"share",
"shed",
"shell",
"sheriff",
"shield",
"shift",
"shine",
"ship",
"shiver",
"shock",
"shoe",
"shoot",
"shop",
"short",
"shoulder",
"shove",
"shrimp",
"shrug",
"shuffle",
"shy",
"sibling",
"sick",
"side",
"siege",
"sight",
"sign",
"silent",
"silk",
"silly",
"silver",
"similar",
"simple",
"since",
"sing",
"siren",
"sister",
"situate",
"six",
"size",
"skate",
"sketch",
"ski",
"skill",
"skin",
"skirt",
"skull",
"slab",
"slam",
"sleep",
"slender",
"slice",
"slide",
"slight",
"slim",
"slogan",
"slot",
"slow",
"slush",
"small",
"smart",
"smile",
"smoke",
"smooth",
"snack",
"snake",
"snap",
"sniff",
"snow",
"soap",
"soccer",
"social",
"sock",
"soda",
"soft",
"solar",
"soldier",
"solid",
"solution",
"solve",
"someone",
"song",
"soon",
"sorry",
"sort",
"soul",
"sound",
"soup",
"source",
"south",
"space",
"spare",
"spatial",
"spawn",
"speak",
"special",
"speed",
"spell",
"spend",
"sphere",
"spice",
"spider",
"spike",
"spin",
"spirit",
"split",
"spoil",
"sponsor",
"spoon",
"sport",
"spot",
"spray",
"spread",
"spring",
"spy",
"square",
"squeeze",
"squirrel",
"stable",
"stadium",
"staff",
"stage",
"stairs",
"stamp",
"stand",
"start",
"state",
"stay",
"steak",
"steel",
"stem",
"step",
"stereo",
"stick",
"still",
"sting",
"stock",
"stomach",
"stone",
"stool",
"story",
"stove",
"strategy",
"street",
"strike",
"strong",
"struggle",
"student",
"stuff",
"stumble",
"style",
"subject",
"submit",
"subway",
"success",
"such",
"sudden",
"suffer",
"sugar",
"suggest",
"suit",
"summer",
"sun",
"sunny",
"sunset",
"super",
"supply",
"supreme",
"sure",
"surface",
"surge",
"surprise",
"surround",
"survey",
"suspect",
"sustain",
"swallow",
"swamp",
"swap",
"swarm",
"swear",
"sweet",
"swift",
"swim",
"swing",
"switch",
"sword",
"symbol",
"symptom",
"syrup",
"system",
"table",
"tackle",
"tag",
"tail",
"talent",
"talk",
"tank",
"tape",
"target",
"task",
"taste",
"tattoo",
"taxi",
"teach",
"team",
"tell",
"ten",
"tenant",
"tennis",
"tent",
"term",
"test",
"text",
"thank",
"that",
"theme",
"then",
"theory",
"there",
"they",
"thing",
"this",
"thought",
"three",
"thrive",
"throw",
"thumb",
"thunder",
"ticket",
"tide",
"tiger",
"tilt",
"timber",
"time",
"tiny",
"tip",
"tired",
"tissue",
"title",
"toast",
"tobacco",
"today",
"toddler",
"toe",
"together",
"toilet",
"token",
"tomato",
"tomorrow",
"tone",
"tongue",
"tonight",
"tool",
"tooth",
"top",
"topic",
"topple",
"torch",
"tornado",
"tortoise",
"toss",
"total",
"tourist",
"toward",
"tower",
"town",
"toy",
"track",
"trade",
"traffic",
"tragic",
"train",
"transfer",
"trap",
"trash",
"travel",
"tray",
"treat",
"tree",
"trend",
"trial",
"tribe",
"trick",
"trigger",
"trim",
"trip",
"trophy",
"trouble",
"truck",
"true",
"truly",
"trumpet",
"trust",
"truth",
"try",
"tube",
"tuition",
"tumble",
"tuna",
"tunnel",
"turkey",
"turn",
"turtle",
"twelve",
"twenty",
"twice",
"twin",
"twist",
"two",
"type",
"typical",
"ugly",
"umbrella",
"unable",
"unaware",
"uncle",
"uncover",
"under",
"undo",
"unfair",
"unfold",
"unhappy",
"uniform",
"unique",
"unit",
"universe",
"unknown",
"unlock",
"until",
"unusual",
"unveil",
"update",
"upgrade",
"uphold",
"upon",
"upper",
"upset",
"urban",
"urge",
"usage",
"use",
"used",
"useful",
"useless",
"usual",
"utility",
"vacant",
"vacuum",
"vague",
"valid",
"valley",
"valve",
"van",
"vanish",
"vapor",
"various",
"vast",
"vault",
"vehicle",
"velvet",
"vendor",
"venture",
"venue",
"verb",
"verify",
"version",
"very",
"vessel",
"veteran",
"viable",
"vibrant",
"vicious",
"victory",
"video",
"view",
"village",
"vintage",
"violin",
"virtual",
"virus",
"visa",
"visit",
"visual",
"vital",
"vivid",
"vocal",
"voice",
"void",
"volcano",
"volume",
"vote",
"voyage",
"wage",
"wagon",
"wait",
"walk",
"wall",
"walnut",
"want",
"warfare",
"warm",
"warrior",
"wash",
"wasp",
"waste",
"water",
"wave",
"way",
"wealth",
"weapon",
"wear",
"weasel",
"weather",
"web",
"wedding",
"weekend",
"weird",
"welcome",
"west",
"wet",
"whale",
"what",
"wheat",
"wheel",
"when",
"where",
"whip",
"whisper",
"wide",
"width",
"wife",
"wild",
"will",
"win",
"window",
"wine",
"wing",
"wink",
"winner",
"winter",
"wire",
"wisdom",
"wise",
"wish",
"witness",
"wolf",
"woman",
"wonder",
"wood",
"wool",
"word",
"work",
"world",
"worry",
"worth",
"wrap",
"wreck",
"wrestle",
"wrist",
"write",
"wrong",
"yard",
"year",
"yellow",
"you",
"young",
"youth",
"zebra",
"zero",
"zone",
"zoo",
]
|
lk-geimfari/elizabeth
|
mimesis/data/int/cryptographic.py
|
Python
|
mit
| 27,537
|
[
"BLAST",
"CASINO",
"CRYSTAL",
"Galaxy",
"Jaguar",
"VisIt"
] |
8f635f808f3b62ac996b773801434dad94b6386c71274158595265cc811a4c78
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import BaseHTTPServer
import ConfigParser
import cookielib
import errno
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import webbrowser
from multiprocessing.pool import ThreadPool
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
LOGGER = logging.getLogger('upload')
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
VCS = [
{'name': VCS_MERCURIAL,
'aliases': ['hg', 'mercurial']},
{'name': VCS_SUBVERSION,
'aliases': ['svn', 'subversion'],},
{'name': VCS_PERFORCE,
'aliases': ['p4', 'perforce']},
{'name': VCS_GIT,
'aliases': ['git']},
{'name': VCS_CVS,
'aliases': ['cvs']},
]
VCS_SHORT_NAMES = [] # hg, svn, ...
VCS_ABBREVIATIONS = {} # alias: name, ...
for vcs in VCS:
VCS_SHORT_NAMES.append(min(vcs['aliases'], key=len))
VCS_ABBREVIATIONS.update((alias, vcs['name']) for alias in vcs['aliases'])
# OAuth 2.0-Related Constants
LOCALHOST_IP = '127.0.0.1'
DEFAULT_OAUTH2_PORT = 8001
ACCESS_TOKEN_PARAM = 'access_token'
ERROR_PARAM = 'error'
OAUTH_DEFAULT_ERROR_MESSAGE = 'OAuth 2.0 error occurred.'
OAUTH_PATH = '/get-access-token'
OAUTH_PATH_PORT_TEMPLATE = OAUTH_PATH + '?port=%(port)d'
AUTH_HANDLER_RESPONSE = """\
<html>
<head>
<title>Authentication Status</title>
<script>
window.onload = function() {
window.close();
}
</script>
</head>
<body>
<p>The authentication flow has completed.</p>
</body>
</html>
"""
# Borrowed from google-api-python-client
OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Your browser has been opened to visit:
%s
If your browser is on a different machine then exit and re-run
upload.py with the command-line parameter
--no_oauth2_webbrowser
"""
NO_OPEN_LOCAL_MESSAGE_TEMPLATE = """\
Go to the following link in your browser:
%s
and copy the access token.
"""
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >> sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self._reason = args["Error"]
self.info = args.get("Info", None)
@property
def reason(self):
# reason is a property on python 2.7 but a member variable on <=2.6.
# self.args is modified so it cannot be used as-is so save the value in
# self._reason.
return self._reason
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None,
extra_headers=None, save_cookies=False,
account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new AbstractRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers or {}
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
LOGGER.info("Server: %s; Host: %s", self.host, self.host_override)
else:
LOGGER.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
LOGGER.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data, headers={"Accept": "text/plain"})
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
print >> sys.stderr, ''
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >> sys.stderr, (
"Use an application-specific password instead "
"of your regular account password.\n"
"See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
else:
print >> sys.stderr, "Invalid username or password."
elif e.reason == "CaptchaRequired":
print >> sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
elif e.reason == "NotVerified":
print >> sys.stderr, "Account not verified."
elif e.reason == "TermsNotAgreed":
print >> sys.stderr, "User has not agreed to TOS."
elif e.reason == "AccountDeleted":
print >> sys.stderr, "The user account has been deleted."
elif e.reason == "AccountDisabled":
print >> sys.stderr, "The user account has been disabled."
break
elif e.reason == "ServiceDisabled":
print >> sys.stderr, ("The user's access to the service has been "
"disabled.")
elif e.reason == "ServiceUnavailable":
print >> sys.stderr, "The service is not available; try again later."
else:
# Unknown error.
raise
print >> sys.stderr, ''
continue
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated and self.auth_function:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req, timeout=70)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
if not self.auth_function:
raise
self._Authenticate()
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
elif e.code >= 500:
# TODO: We should error out on a 500, but the server is too flaky
# for that at the moment.
StatusUpdate('Upload got a 500 response: %d' % e.code)
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
if isinstance(self.auth_function, OAuth2Creds):
access_token = self.auth_function()
if access_token is not None:
self.extra_headers['Authorization'] = 'OAuth %s' % (access_token,)
self.authenticated = True
else:
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
class CondensedHelpFormatter(optparse.IndentedHelpFormatter):
"""Frees more horizontal space by removing indentation from group
options and collapsing arguments between short and long, e.g.
'-o ARG, --opt=ARG' to -o --opt ARG"""
def format_heading(self, heading):
return "%s:\n" % heading
def format_option(self, option):
self.dedent()
res = optparse.HelpFormatter.format_option(self, option)
self.indent()
return res
def format_option_strings(self, option):
self.set_long_opt_delimiter(" ")
optstr = optparse.HelpFormatter.format_option_strings(self, option)
optlist = optstr.split(", ")
if len(optlist) > 1:
if option.takes_value():
# strip METAVAR from all but the last option
optlist = [x.split()[0] for x in optlist[:-1]] + optlist[-1:]
optstr = " ".join(optlist)
return optstr
parser = optparse.OptionParser(
usage=("%prog [options] [-- diff_options] [path...]\n"
"See also: http://code.google.com/p/rietveld/wiki/UploadPyUsage"),
add_help_option=False,
formatter=CondensedHelpFormatter()
)
parser.add_option("-h", "--help", action="store_true",
help="Show this help message and exit.")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--oauth2", action="store_true",
dest="use_oauth2", default=False,
help="Use OAuth 2.0 instead of a password.")
group.add_option("--oauth2_port", action="store", type="int",
dest="oauth2_port", default=DEFAULT_OAUTH2_PORT,
help=("Port to use to handle OAuth 2.0 redirect. Must be an "
"integer in the range 1024-49151, defaults to "
"'%default'."))
group.add_option("--no_oauth2_webbrowser", action="store_false",
dest="open_oauth2_local_webbrowser", default=True,
help="Don't open a browser window to get an access token.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
group.add_option("-j", "--number-parallel-uploads",
dest="num_upload_threads", default=8,
help="Number of uploads to do in parallel.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-t", "--title", action="store", dest="title",
help="New issue subject or new patch set title")
group.add_option("--project", action="store", dest="project",
help="The project the issue belongs to")
group.add_option("-m", "--message", action="store", dest="message",
default=None,
help="New issue description or new patch set message")
group.add_option("-F", "--file", action="store", dest="file",
default=None, help="Read the message above from file.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base URL path for files (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--target_ref", action="store", dest="target_ref",
default=None,
help="The target ref that is transitively tracked by the "
"local branch this patch comes from.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("-p", "--send_patch", action="store_true",
dest="send_patch", default=False,
help="Same as --send_mail, but include diff as an "
"attachment, and prepend email subject with 'PATCH:'.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Explicitly specify version control system (%s)"
% ", ".join(VCS_SHORT_NAMES)))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Git-specific
group = parser.add_option_group("Git-specific options")
group.add_option("--git_similarity", action="store", dest="git_similarity",
metavar="SIM", type="int", default=50,
help=("Set the minimum similarity percentage for detecting "
"renames and copies. See `git diff -C`. (default 50)."))
group.add_option("--git_only_search_patch", action="store_false", default=True,
dest='git_find_copies_harder',
help="Removes --find-copies-harder when seaching for copies")
group.add_option("--git_no_find_copies", action="store_false", default=True,
dest="git_find_copies",
help=("Prevents git from looking for copies (default off)."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
# OAuth 2.0 Methods and Helpers
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters for an access token
or an error and then stops serving.
"""
access_token = None
error = None
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for redirects back to localhost from the associated server.
Waits for a single request and parses the query parameters into the server's
access_token or error and then stops serving.
"""
def SetResponseValue(self):
"""Stores the access token or error from the request on the server.
Will only do this if exactly one query parameter was passed in to the
request and that query parameter used 'access_token' or 'error' as the key.
"""
query_string = urlparse.urlparse(self.path).query
query_params = urlparse.parse_qs(query_string)
if len(query_params) == 1:
if query_params.has_key(ACCESS_TOKEN_PARAM):
access_token_list = query_params[ACCESS_TOKEN_PARAM]
if len(access_token_list) == 1:
self.server.access_token = access_token_list[0]
else:
error_list = query_params.get(ERROR_PARAM, [])
if len(error_list) == 1:
self.server.error = error_list[0]
def do_GET(self):
"""Handle a GET request.
Parses and saves the query parameters and prints a message that the server
has completed its lone task (handling a redirect).
Note that we can't detect if an error occurred.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.SetResponseValue()
self.wfile.write(AUTH_HANDLER_RESPONSE)
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def OpenOAuth2ConsentPage(server=DEFAULT_REVIEW_SERVER,
port=DEFAULT_OAUTH2_PORT):
"""Opens the OAuth 2.0 consent page or prints instructions how to.
Uses the webbrowser module to open the OAuth server side page in a browser.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
A boolean indicating whether the page opened successfully.
"""
path = OAUTH_PATH_PORT_TEMPLATE % {'port': port}
parsed_url = urlparse.urlparse(server)
scheme = parsed_url[0] or 'https'
if scheme != 'https':
ErrorExit('Using OAuth requires a review server with SSL enabled.')
# If no scheme was given on command line the server address ends up in
# parsed_url.path otherwise in netloc.
host = parsed_url[1] or parsed_url[2]
page = '%s://%s%s' % (scheme, host, path)
page_opened = webbrowser.open(page, new=1, autoraise=True)
if page_opened:
print OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
return page_opened
def WaitForAccessToken(port=DEFAULT_OAUTH2_PORT):
"""Spins up a simple HTTP Server to handle a single request.
Intended to handle a single redirect from the production server after the
user authenticated via OAuth 2.0 with the server.
Args:
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
Returns:
The access token passed to the localhost server, or None if no access token
was passed.
"""
httpd = ClientRedirectServer((LOCALHOST_IP, port), ClientRedirectHandler)
# Wait to serve just one request before deferring control back
# to the caller of wait_for_refresh_token
httpd.handle_request()
if httpd.access_token is None:
ErrorExit(httpd.error or OAUTH_DEFAULT_ERROR_MESSAGE)
return httpd.access_token
def GetAccessToken(server=DEFAULT_REVIEW_SERVER, port=DEFAULT_OAUTH2_PORT,
open_local_webbrowser=True):
"""Gets an Access Token for the current user.
Args:
server: String containing the review server URL. Defaults to
DEFAULT_REVIEW_SERVER.
port: Integer, the port where the localhost server receiving the redirect
is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_local_webbrowser: Boolean, defaults to True. If set, opens a page in
the user's browser.
Returns:
A string access token that was sent to the local server. If the serving page
via WaitForAccessToken does not receive an access token, this method
returns None.
"""
access_token = None
if open_local_webbrowser:
page_opened = OpenOAuth2ConsentPage(server=server, port=port)
if page_opened:
try:
access_token = WaitForAccessToken(port=port)
except socket.error, e:
print 'Can\'t start local webserver. Socket Error: %s\n' % (e.strerror,)
if access_token is None:
# TODO(dhermes): Offer to add to clipboard using xsel, xclip, pbcopy, etc.
page = 'https://%s%s' % (server, OAUTH_PATH)
print NO_OPEN_LOCAL_MESSAGE_TEMPLATE % (page,)
access_token = raw_input('Enter access token: ').strip()
return access_token
class KeyringCreds(object):
def __init__(self, server, host, email):
self.server = server
# Explicitly cast host to str to work around bug in old versions of Keyring
# (versions before 0.10). Even though newer versions of Keyring fix this,
# some modern linuxes (such as Ubuntu 12.04) still bundle a version with
# the bug.
self.host = str(host)
self.email = email
self.accounts_seen = set()
def GetUserCredentials(self):
"""Prompts the user for a username and password.
Only use keyring on the initial call. If the keyring contains the wrong
password, we want to give the user a chance to enter another one.
"""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
global keyring
email = self.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % self.server)
password = None
if keyring and not email in self.accounts_seen:
try:
password = keyring.get_password(self.host, email)
except:
# Sadly, we have to trap all errors here as
# gnomekeyring.IOError inherits from object. :/
print "Failed to get password from keyring"
keyring = None
if password is not None:
print "Using password from system keyring."
self.accounts_seen.add(email)
else:
password = getpass.getpass("Password for %s: " % email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(self.host, email, password)
self.accounts_seen.add(email)
return (email, password)
class OAuth2Creds(object):
"""Simple object to hold server and port to be passed to GetAccessToken."""
def __init__(self, server, port, open_local_webbrowser=True):
self.server = server
self.port = port
self.open_local_webbrowser = open_local_webbrowser
def __call__(self):
"""Uses stored server and port to retrieve OAuth 2.0 access token."""
return GetAccessToken(server=self.server, port=self.port,
open_local_webbrowser=self.open_local_webbrowser)
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE, use_oauth2=False,
oauth2_port=DEFAULT_OAUTH2_PORT,
open_oauth2_local_webbrowser=True):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
use_oauth2: Boolean indicating whether OAuth 2.0 should be used for
authentication.
oauth2_port: Integer, the port where the localhost server receiving the
redirect is serving. Defaults to DEFAULT_OAUTH2_PORT.
open_oauth2_local_webbrowser: Boolean, defaults to True. If True and using
OAuth, this opens a page in the user's browser to obtain a token.
Returns:
A new HttpRpcServer, on which RPC calls can be made.
"""
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "[email protected]"
LOGGER.info("Using debug user %s. Override with --email" % email)
server = HttpRpcServer(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
positional_args = [server]
if use_oauth2:
positional_args.append(
OAuth2Creds(server, oauth2_port, open_oauth2_local_webbrowser))
else:
positional_args.append(KeyringCreds(server, host, email).GetUserCredentials)
return HttpRpcServer(*positional_args,
host_override=host_override,
save_cookies=save_cookies,
account_type=account_type)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-%s-' % sum(hash(f) for f in files)
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCodeAndStderr(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Run a command and return output from stdout, stderr and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (stdout, stderr, return code)
"""
LOGGER.info("Running %s", command)
env = env.copy()
env['LC_MESSAGES'] = 'C'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >> sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, errout, p.returncode
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Run a command and return output from stdout and the return code."""
out, err, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
universal_newlines, env)
return out, retcode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GetGUID(self):
"""Return string to distinguish the repository from others, for example to
query all opened review issues for it"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
result = ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
elif options.verbose:
result = "Uploading %s file for %s" % (type, filename)
checksum = md5(content).hexdigest()
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
try:
response_body = rpc_server.Send(url, body, content_type=ctype)
except urllib2.HTTPError, e:
response_body = ("Failed to upload file for %s. Got %d status code." %
(filename, e.code))
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
return result
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
threads = []
thread_pool = ThreadPool(options.num_upload_threads)
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
t = thread_pool.apply_async(UploadFile, args=(filename,
file_id, base_content, is_binary, status, True))
threads.append(t)
if new_content != None:
t = thread_pool.apply_async(UploadFile, args=(filename,
file_id, new_content, is_binary, status, False))
threads.append(t)
for t in threads:
print t.get(timeout=60)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return (mimetype.startswith("image/") and
not mimetype.startswith("image/svg"))
def IsBinaryData(self, data):
"""Returns true if data contains a null byte."""
# Derived from how Mercurial's heuristic, see
# http://selenic.com/hg/file/848a6658069e/mercurial/util.py#l229
return bool(data and "\0" in data)
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GetGUID(self):
return self._GetInfo("Repository UUID")
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
url = self._GetInfo("URL")
if url:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
# TODO(anatoli) - repository specific hacks should be handled by server
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
LOGGER.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def _GetInfo(self, key):
"""Parses 'svn info' for current dir. Returns value for key or None"""
for line in RunShell(["svn", "info"]).splitlines():
if line.startswith(key + ": "):
return line.split(":", 1)[1].strip()
def _EscapeFilename(self, filename):
"""Escapes filename for SVN commands."""
if "@" in filename and not filename.endswith("@"):
filename = "%s@" % filename
return filename
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
LOGGER.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals",
self._EscapeFilename(filename)])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start,
self._EscapeFilename(dirname) or "."]
out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
if returncode:
# Directory might not yet exist at start revison
# svn: Unable to find repository location for 'abc' in revision nnn
if re.match('^svn: Unable to find repository location '
'for .+ in revision \d+', err):
old_files = ()
else:
ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
else:
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [self._EscapeFilename(dirname) or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type",
self._EscapeFilename(filename)], silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary:
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
# filename must not be escaped. We already add an ampersand here.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
# this test for binary is exactly the test prescribed by the
# official SVN docs at
# http://subversion.apache.org/faq.html#binary-files
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
mimetype not in ("image/x-xbitmap", "image/x-xpixmap"))
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", self._EscapeFilename(filename)],
universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def GetGUID(self):
revlist = RunShell("git rev-list --parents HEAD".split()).splitlines()
# M-A: Return the 1st root hash, there could be multiple when a
# subtree is merged. In that case, more analysis would need to
# be done to figure out which HEAD is the 'most representative'.
for r in revlist:
if ' ' not in r:
return r
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if "GIT_EXTERNAL_DIFF" in env:
del env["GIT_EXTERNAL_DIFF"]
# -M/-C will not print the diff for the deleted file when a file is renamed.
# This is confusing because the original file will not be shown on the
# review when a file is renamed. So, get a diff with ONLY deletes, then
# append a diff (with rename detection), without deletes.
cmd = [
"git", "diff", "--no-color", "--no-ext-diff", "--full-index",
"--ignore-submodules", "--src-prefix=a/", "--dst-prefix=b/",
]
diff = RunShell(
cmd + ["--no-renames", "--diff-filter=D"] + extra_args,
env=env, silent_ok=True)
assert 0 <= self.options.git_similarity <= 100
if self.options.git_find_copies:
similarity_options = ["-l100000", "-C%d%%" % self.options.git_similarity]
if self.options.git_find_copies_harder:
similarity_options.append("--find-copies-harder")
else:
similarity_options = ["-M%d%%" % self.options.git_similarity ]
diff += RunShell(
cmd + ["--diff-filter=AMCRT"] + similarity_options + extra_args,
env=env, silent_ok=True)
# The CL could be only file deletion or not. So accept silent diff for both
# commands then check for an empty diff manually.
if not diff:
ErrorExit("No output from %s" % (cmd + extra_args))
return diff
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=False)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(
["git", "show", "HEAD:" + filename], silent_ok=True,
universal_newlines=False)
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
# Grab the before/after content if we need it.
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before)
is_binary = self.IsImage(filename)
if base_content:
is_binary = is_binary or self.IsBinaryData(base_content)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if hash_after:
new_content = self.GetFileContent(hash_after)
is_binary = is_binary or self.IsBinaryData(new_content)
if not is_binary:
new_content = None
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetGUID(self):
"""For now we don't know how to get repository ID for CVS"""
return
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, self.IsBinaryData(base_content), status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode in [0, 1]:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
LOGGER.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
data, retcode = RunShellWithReturnCode(["cvs", "diff"])
if retcode not in [0, 1]:
ErrorExit("Got error status from 'cvs diff':\n%s" % (data,))
unknown_files = []
for line in data.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def GetGUID(self):
# See chapter "Uniquely identifying a repository"
# http://hgbook.red-bean.com/read/customizing-the-output-of-mercurial.html
info = RunShell("hg log -r0 --template {node}".split())
return info.strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
absname = os.path.join(self.repo_dir, filename)
return os.path.relpath(absname)
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
LOGGER.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir,
# but "hg diff" has given us the path relative to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = self.IsBinaryData(base_content)
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or self.IsBinaryData(new_content)
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary:
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.title:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_title = description["desc"].strip()
lines = raw_title.splitlines()
if len(lines):
options.title = lines[0]
def GetGUID(self):
"""For now we don't know how to get repository ID for Perforce"""
return
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
def GetFileProperties(self, property_key_prefix = "", command = "describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at http://permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary:
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
def UploadFile(filename, data):
form_fields = [("filename", filename)]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
try:
response_body = rpc_server.Send(url, body, content_type=ctype)
except urllib2.HTTPError, e:
response_body = ("Failed to upload patch for %s. Got %d status code." %
(filename, e.code))
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
return ("Uploaded patch for " + filename, [lines[1], filename])
threads = []
thread_pool = ThreadPool(options.num_upload_threads)
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
filename = patch[0]
data = patch[1]
t = thread_pool.apply_async(UploadFile, args=(filename, data))
threads.append(t)
for t in threads:
result = t.get(timeout=60)
print result[0]
rv.append(result[1])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
def RunDetectCommand(vcs_type, command):
"""Helper to detect VCS by executing command.
Returns:
A pair (vcs, output) or None. Throws exception on error.
"""
try:
out, returncode = RunShellWithReturnCode(command)
if returncode == 0:
return (vcs_type, out.strip())
except OSError, (errcode, message):
if errcode != errno.ENOENT: # command not found code
raise
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
if res != None:
return res
# Subversion from 1.7 has a single centralized .svn folder
# ( see http://subversion.apache.org/docs/release-notes/1.7.html#wc-ng )
# That's why we use 'svn info' instead of checking for .svn dir
res = RunDetectCommand(VCS_SUBVERSION, ["svn", "info"])
if res != None:
return res
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
"--is-inside-work-tree"])
if res != None:
return res
# detect CVS repos use `cvs status && $? == 0` rules
res = RunDetectCommand(VCS_CVS, ["cvs", "status"])
if res != None:
return res
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName(options)
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
elif vcs == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
options, args = parser.parse_args(argv[1:])
if options.help:
if options.verbose < 2:
# hide Perforce options
parser.epilog = (
"Use '--help -v' to show additional Perforce options. "
"For more help, see "
"http://code.google.com/p/rietveld/wiki/CodeReviewHelp"
)
parser.option_groups.remove(parser.get_option_group('--p4_port'))
parser.print_help()
sys.exit(0)
global verbosity
verbosity = options.verbose
if verbosity >= 3:
LOGGER.setLevel(logging.DEBUG)
elif verbosity >= 2:
LOGGER.setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
LOGGER.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.use_oauth2:
options.save_cookies = False
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type,
options.use_oauth2,
options.oauth2_port,
options.open_oauth2_local_webbrowser)
form_fields = []
repo_guid = vcs.GetGUID()
if repo_guid:
form_fields.append(("repo_guid", repo_guid))
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
LOGGER.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
if options.project:
form_fields.append(("project", options.project))
if options.target_ref:
form_fields.append(("target_ref", options.target_ref))
# Process --message, --title and --file.
message = options.message or ""
title = options.title or ""
if options.file:
if options.message:
ErrorExit("Can't specify both message and message file options")
file = open(options.file, 'r')
message = file.read()
file.close()
if options.issue:
prompt = "Title describing this patch set: "
else:
prompt = "New issue subject: "
title = (
title or message.split('\n', 1)[0].strip() or raw_input(prompt).strip())
if not title and not options.issue:
ErrorExit("A non-empty title is required for a new issue")
# For existing issues, it's fine to give a patchset an empty name. Rietveld
# doesn't accept that so use a whitespace.
title = title or " "
if len(title) > 100:
title = title[:99] + '…'
if title and not options.issue:
message = message or title
form_fields.append(("subject", title))
# If it's a new issue send message as description. Otherwise a new
# message is created below on upload_complete.
if message and not options.issue:
form_fields.append(("description", message))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
if options.send_patch:
options.send_mail = True
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
payload = {} # payload for final request
if options.send_mail:
payload["send_mail"] = "yes"
if options.send_patch:
payload["attach_patch"] = "yes"
if options.issue and message:
payload["message"] = message
payload = urllib.urlencode(payload)
rpc_server.Send("/" + issue + "/upload_complete/" + (patchset or ""),
payload=payload)
return issue, patchset
def main():
try:
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
|
G-P-S/depot_tools
|
third_party/upload.py
|
Python
|
bsd-3-clause
| 100,112
|
[
"VisIt"
] |
7695103c6fd037d5a56216ea17d4a88851f45c6a677dbec8ee191b482021742f
|
#!/usr/bin/env python
import sys
import inspect
import re
import optparse
import vtk
from pydoc import classname
'''
This is a translation from the TCL code of the same name.
The original TCL code uses catch {...}
catch returns 1 if an error occurred an 0 if no error.
In this code TryUpdate(), TryShutdown(), TrySetInputData and
TestOne() emulate this by returning False if an error occurred
and True if no error occurred.
'''
vtk.vtkObject.GlobalWarningDisplayOff()
def RedirectVTKMessages():
""" Can be used to redirect VTK related error messages to a
file."""
log = vtk.vtkFileOutputWindow()
log.SetFlush(1)
log.AppendOff()
log.SetFileName('TestEmptyInput-err.log')
log.SetInstance(log)
commonExceptions = set([
"vtkDistributedDataFilter", # core dump
"vtkDataEncoder", # Use after free error.
"vtkWebApplication", # Thread issues - calls vtkDataEncoder
# These give an HDF5 no file name error.
"vtkAMRFlashParticlesReader",
"vtkAMREnzoParticlesReader",
"vtkAMRFlashReader"
])
classLinuxExceptions = set([
"vtkAMREnzoReader" # core dump
])
# In the case of Windows, these classes cause a crash.
classWindowsExceptions = set([
"vtkWin32VideoSource", # Update() works the first time but a subsequent run calls up the webcam which crashes on close.
"vtkCMLMoleculeReader",
"vtkCPExodusIIInSituReader",
"vtkMINCImageWriter",
"vtkQtInitialization"
])
classExceptions = set()
emptyPD = vtk.vtkPolyData()
emptyID = vtk.vtkImageData()
emptySG = vtk.vtkStructuredGrid()
emptyUG = vtk.vtkUnstructuredGrid()
emptyRG = vtk.vtkRectilinearGrid()
# This will hold the classes to be tested.
vtkClasses = set()
classNames = None
classesTested = set()
# Keep a record of the classes tested.
nonexistentClasses = set()
abstractClasses = set()
noConcreteImplementation = set()
noShutdown = set()
noObserver = set()
# Is a vtkAlgorithm but EmptyInput failed.
isVtkAlgorithm = set()
emptyInputWorked = set()
emptyInputFailed = set()
#------------------------
# These variables are used for further record keeping
# should users wish to investigate or debug further.
noUpdate = set()
noSetInput = set()
# A dictionary consisting of a key and five booleans corresponding to
# whether SetInput() using emptyPD, emptyID, emptySG, emptyUG, emptyRG
# worked.
inputStatus = dict()
# A dictionary consisting of a key and five booleans corresponding to
# whether Update() using emptyPD, emptyID, emptySG, emptyUG, emptyRG
# worked if SetInput() worked.
updateStatus = dict()
#-----------------------
# Controls the verbosity of the output.
verbose = False
class ErrorObserver:
'''
See: http://public.kitware.com/pipermail/vtkusers/2012-June/074703.html
'''
def __init__(self):
self.__ErrorOccurred = False
self.__ErrorMessage = None
self.CallDataType = 'string0'
def __call__(self, obj, event, message):
self.__ErrorOccurred = True
self.__ErrorMessage = message
def ErrorOccurred(self):
occ = self.__ErrorOccurred
self.__ErrorOccurred = False
return occ
def ErrorMessage(self):
return self.__ErrorMessage
def GetVTKClasses():
'''
:return: a set of all the VTK classes.
'''
# This pattern will get the name and type of the member in the vtk classes.
pattern = r'\<vtkclass (.*)\.(.*)\>'
regEx = re.compile(pattern)
vtkClasses = inspect.getmembers(
vtk, inspect.isclass and not inspect.isabstract)
res = set()
for name, obj in vtkClasses:
result = re.match(regEx, repr(obj))
if result:
res.add(result.group(2))
return res
def GetVTKClassGroups(vtkClasses, subStr):
'''
:param: vtkClasses - the set of VTK classes
:param: subStr - the substring for the VTK class to match e.g Readers
:return: a set of all the VTK classes that are readers.
'''
res = set()
for obj in list(vtkClasses):
if obj.find(subStr) > -1:
res.add(obj)
return res
def FilterClasses(allClasses, filter):
'''
:param: vtkCLasses - the set of VTK classes
:param: filters - a list of substrings of classes to be removed
:return: a set of all the VTK classes that do not have the substrings
in their names.
'''
res = allClasses
for f in filter:
c = GetVTKClassGroups(allClasses, f)
res = res - c
return res
def TryUpdate(b, e):
'''
The method Update() is tried on the instantiated class.
Some classes do not have an Update method, if this is the case,
Python will generate an AttributeError, in this case, the name
of the class is stored in the global variable noUpdate and false
is returned.
If an error occurs on Update() then the error handler
will be triggered and in this case false is returned.
:param: b - the class on which Update() will be tried.
:param: e - the error handler.
:return: True if the update was successful, False otherwise.
'''
try:
b.Update()
if e.ErrorOccurred():
return False
else:
return True
except AttributeError:
# No Update() method
noUpdate.add(b.GetClassName())
return False
except:
raise
def TryShutdown(b, e):
'''
The method Shutdown() is tried on the instantiated class.
Some classes do not have an Shutdown method, if this is the case,
Python will generate an AttributeError, in this case, the name
of the class is stored in the global variable noUpdate and False
is returned.
If an error occurs on Shutdown() then the error handler
will be triggered and in this case False is returned.
:param: b - the class on which Shutdown() will be tried.
:param: e - the error handler.
:return: True if the update was successful, False otherwise.
'''
try:
b.Shutdown()
if e.ErrorOccurred():
return False
else:
return True
except AttributeError:
# No Shutdown() method
noShutdown.add(b.GetClassName())
return False
except:
raise
def TrySetInputData(b, e, d):
'''
The method SetInputData() is tried on the instantiated class.
Some classes do not have an SetInputData method, if this is the case,
Python will generate an AttributeError, in this case, the name
of the class is stored in the global variable noUpdate and False
is returned.
If an error occurs on SetInputData() then the error handler
will be triggered and in this case False is returned.
:param: b - the class on which SetInputData() will be tried.
:param: e - the error handler.
:param: d - input data.
:return: True if the update was successful, False otherwise.
'''
try:
b.SetInputData(d)
if e.ErrorOccurred():
return False
else:
return True
except AttributeError:
# No SetInputData() method
noSetInput.add(b.GetClassName())
return False
except:
raise
def TestOne(cname):
'''
Test the named class.
Some classes will generate a TypeError or an AttributeError,
in this case, the name of the class is stored in the global variable
abstractClasses or noConcreteImplementation with False being returned.
Return values:
0: If not any of the SetInput() tests and the corresponding
Update() are both true.
1: If at least one of SetInput() tests and the corresponding
Update() are both true.
2: No observer could be added.
3: If it is an abstract class.
4: No concrete implementation.
5: Class does not exist.
:param: cname - the name of the class to be tested.
:return: One of the above return values.
'''
try:
b = getattr(vtk, cname)()
e = ErrorObserver()
isAlgorithm = False
# A record of whether b.SetInput() worked or not.
# The indexing corresponds to using
# emptyPD, emptyID, emptySG, emptyUG, emptyRG in SetInput()
iStatus = [False, False, False, False, False]
# A record of whether b.Update() worked or not.
# The indexing corresponds to:
# [emptyPD, emptyID, emptySG, emptyUG, emptyRG]
uStatus = [False, False, False, False, False]
try:
b.AddObserver('ErrorEvent', e)
except AttributeError:
noObserver.add(cname)
return 2
except:
raise
if b.IsA("vtkAlgorithm"):
u = TryUpdate(b, e)
if u:
isAlgorithm = True
if TrySetInputData(b, e, emptyPD):
iStatus[0] = True
u = TryUpdate(b, e)
if u:
uStatus[0] = True
if TrySetInputData(b, e, emptyID):
iStatus[1] = True
u = TryUpdate(b, e)
if u:
uStatus[1] = True
if TrySetInputData(b, e, emptySG):
iStatus[2] = True
u = TryUpdate(b, e)
if u:
uStatus[2] = True
if TrySetInputData(b, e, emptyUG):
iStatus[3] = True
u = TryUpdate(b, e)
if u:
uStatus[3] = True
if TrySetInputData(b, e, emptyRG):
iStatus[4] = True
u = TryUpdate(b, e)
if u:
uStatus[4] = True
# If thread creation moves away from the vtkGeoSource constructor, then
# this ShutDown call will not be necessary...
#
if b.IsA("vtkGeoSource"):
TryShutdown(b, e)
inputStatus[cname] = iStatus
updateStatus[cname] = uStatus
ok = False
# We require input and update to work for success.
mergeStatus = map(lambda pair: pair[0] & pair[1], zip(iStatus, uStatus))
for value in mergeStatus:
ok |= value
if not(ok) and isAlgorithm:
isVtkAlgorithm.add(cname)
if ok:
emptyInputWorked.add(cname)
else:
emptyInputFailed.add(cname)
b = None
if ok:
return 1
return 0
except TypeError:
# Trapping abstract classes.
abstractClasses.add(cname)
return 3
except NotImplementedError:
# No concrete implementation
noConcreteImplementation.add(cname)
return 4
except AttributeError:
# Class does not exist
nonexistentClasses.add(cname)
return 5
except:
raise
def TestEmptyInput(batch, batchNo=0, batchSize=0):
'''
Test each class in batch for empty input.
:param: batch - the set of classes to be tested.
:param: batchNo - if the set of classes is a subgroup then this
is the index of the subgroup.
:param: batchSize - if the set of classes is a subgroup then this
is the size of the subgroup.
'''
baseIdx = batchNo * batchSize
idx = baseIdx
for a in batch:
batchIdx = idx - baseIdx
# res = " Testing -- {:4d} - {:s}".format(idx,a)
# There is no format method in Python 2.5
res = " Testing -- %4d - %s" % (idx, a)
if (batchIdx < len(batch) - 1):
# nextRes = " Next -- {:4d} - {:s}".format(idx + 1,list(batch)[batchIdx +1])
nextRes = " Next -- %4d - %s" % (idx + 1, list(batch)[batchIdx + 1])
else:
nextRes = "No next"
# if verbose:
# print(res, nextRes)
classesTested.add(a)
ok = TestOne(a)
if ok == 0:
if verbose:
print(res + ' - Fail')
elif ok == 1:
if verbose:
print(res + ' - Ok')
elif ok == 2:
if verbose:
print(res + ' - no observer could be added.')
elif ok == 3:
if verbose:
print(res + ' - is Abstract')
elif ok == 4:
if verbose:
print(res + ' - No concrete implementation')
elif ok == 5:
if verbose:
print(res + ' - Does not exist')
else:
if verbose:
print(res + ' - Unknown status')
idx += 1
def BatchTest(vtkClasses, batchNo, batchSize):
'''
Batch the classes into groups of batchSize.
:param: batchNo - if the set of classes is a subgroup then this
is the index of the subgroup.
:param: batchSize - if the set of classes is a subgroup then this
is the size of the subgroup.
'''
idx = 0
total = 0;
batch = set()
for a in vtkClasses:
currentBatchNo = idx // batchSize;
if currentBatchNo == batchNo:
batch.add(a)
total += 1
if total == batchSize:
TestEmptyInput(batch, batchNo, batchSize)
print(total)
batch = set()
total = 0
idx += 1
if batch:
TestEmptyInput(batch, batchNo, batchSize)
print(total)
def PrintResultSummary():
print('-' * 40)
print('Empty Input worked: %i' % len(emptyInputWorked))
print('Empty Input failed: %i' % len(emptyInputFailed))
print('Abstract classes: %i' % len(abstractClasses))
print('Non-existent classes: %i' % len(nonexistentClasses))
print('No concrete implementation: %i' % len(noConcreteImplementation))
print('No observer could be added: %i' % len(noObserver))
print('-' * 40)
print('Total number of classes tested: ', len(classesTested)) # , classesTested
print('-' * 40)
print('Excluded from testing: ', len(classExceptions))
print('-' * 40)
def ProgramOptions():
desc = """
%prog Tests each VTK class for empty input using various data structures.
"""
parser = optparse.OptionParser(description=desc)
parser.set_defaults(verbose=False)
parser.add_option('-c', '--classnames',
help='The name of the class or a list of classes in quotes separated by commas.',
type='string',
dest='classnames',
default=None,
action='store')
parser.add_option('-q', '--quiet',
help='Do not print status messages to stdout (default)',
dest='verbose',
action="store_false")
parser.add_option('-v', '--verbose',
help='Print status messages to stdout',
dest='verbose',
action="store_true")
(opts, args) = parser.parse_args()
return (True, opts)
def CheckPythonVersion(ver):
'''
Check the Python version.
:param: ver - the minimum required version number as hexadecimal.
:return: True if if the Python version is greater than or equal to ver.
'''
if sys.hexversion < ver:
return False
return True
def main(argv=None):
if not CheckPythonVersion(0x02060000):
print('This program requires Python 2.6 or greater.')
return
global classExceptions
global vtkClasses
global classNames
global verbose
if argv is None:
argv = sys.argv
(res, opts) = ProgramOptions()
if opts.classnames:
cn = [x.strip() for x in opts.classnames.split(',')]
classNames = set(cn)
if opts.verbose:
verbose = opts.verbose
print('CTEST_FULL_OUTPUT (Avoid ctest truncation of output)')
# RedirectVTKMessages()
if classNames:
TestEmptyInput(classNames)
else:
classExceptions = commonExceptions.union(classLinuxExceptions)
classExceptions = classExceptions.union(classWindowsExceptions)
vtkClasses = GetVTKClasses()
# filter = ['Reader', 'Writer', 'Array_I', 'Qt']
# vtkClasses = FilterClasses(vtkClasses, filter)
vtkClasses = vtkClasses - classExceptions
TestEmptyInput(vtkClasses)
# In Windows
# 0-10, 10-17, 17-18, 18-23 in steps of 100 work but not if called
# in a loop.
# intervals = [[18,20]]# [[0,10]], [10,17], [17,18], [18,20]]
# for j in intervals:
# for i in range(j[0],j[1]):
# BatchTest(vtkClasses, i, 100)
PrintResultSummary()
if __name__ == '__main__':
sys.exit(main())
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Common/Core/Testing/Python/TestEmptyInput.py
|
Python
|
gpl-3.0
| 16,244
|
[
"VTK"
] |
16e2b0c9e588bc480d37b382bec768b3d90c4498f47f52c18acace726f9fd60a
|
#!/usr/bin/python
import argparse
import arguments
import logconfig
import session
from app.elk.tiny_builder import TinyElkBuilder
def create_stack(args):
boto3_session = session.new(args.profile, args.region, args.role)
builder = TinyElkBuilder(args, boto3_session, False)
return builder.build(args.dry_run)
default_desc = 'Tiny ELK Stack'
default_es_instance_type = 't2.micro'
default_kibana_instance_type = 't2.micro'
def get_args():
ap = argparse.ArgumentParser(description='Create a CloudFormation stack hosting a tiny ELK stack: 1 server for Logstash/Elasticsearch, 1 server for Kibana',
add_help=False)
req = ap.add_argument_group('Required')
req.add_argument('stack_name',
help='Name of the ELK stack to create')
req.add_argument('network_stack_name',
help='Name of the network stack')
req.add_argument('--server-key', required=True,
help='Name of the key pair used to access the ELK server instances.')
st = ap.add_argument_group('Stack definitions')
st.add_argument('--desc', default=default_desc,
help=arguments.generate_help('Stack description.', default_desc))
st.add_argument('--es-instance-type', default=default_es_instance_type,
help=arguments.generate_help('Instance type for the Elasticsearch server.', default_es_instance_type))
st.add_argument('--kibana-instance-type', default=default_kibana_instance_type,
help=arguments.generate_help('Instance type for the Kibana server.', default_kibana_instance_type))
arguments.add_deployment_group(ap)
arguments.add_security_control_group(ap)
return ap.parse_args()
if __name__ == '__main__':
logconfig.config()
args = get_args()
results = create_stack(args)
# TODO: move these to logging messages
if results.dry_run:
print results.template
else:
print 'ID: ', results.stack.stack_id
print 'STATUS: ', results.stack.stack_status
if results.stack.stack_status_reason is not None:
print 'REASON: ', results.stack.stack_status_reason
|
mccormickmichael/laurel
|
create_tiny_elk_stack.py
|
Python
|
unlicense
| 2,193
|
[
"Elk"
] |
1ed420b96bc3ba40051566956029f3e5fd2a0a629d12d04ddce85458a9288e55
|
# pylint: disable=C0111
# pylint: disable=W0621
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
# pylint: disable=W0401
# Disable the "Unused import %s from wildcard import" warning
# pylint: disable=W0614
# Disable the "unused argument" warning because lettuce uses "step"
# pylint: disable=W0613
# django_url is assigned late in the process of loading lettuce,
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from nose.tools import assert_equals # pylint: disable=E0611
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert 'Dashboard' in world.browser.title
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
assert world.url_equals(path)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
world.register_by_course_id(course_id, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
|
jswope00/GAI
|
common/djangoapps/terrain/steps.py
|
Python
|
agpl-3.0
| 6,709
|
[
"VisIt"
] |
3769fdc8270ce9ace32c7fa57541c7c90cdc32df9e9cd8e3017e50d303920b17
|
# Interface for registration
# Written by: Amber Thatcher, Stephen Coakley, Nyia Lor, and Jaiden Trinh
# Written for: Instant messenger project
# Created on: 4/10/2016
from client import rpc
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import *
import queue
# Set up a global incoming message queue.
message_queue = queue.Queue()
"""
Handles commands pushed from the server.
"""
class Handler(rpc.Handler):
"""
Puts an incoming message into the message queue.
"""
def receive_message(self, **kwargs):
message_queue.put(kwargs)
"""
The main connection and login window.
Window shown before launching the main chat window. Handles connecting to the
server, logging in, and registration.
"""
class LoginWindow:
default_address = "0.0.0.0"
default_port = 6543
def __init__(self, window):
self.window = window
self.frame = None
self.proxy = None
self.username = StringVar()
self.password = StringVar()
window.protocol("WM_DELETE_WINDOW", self.close)
window.resizable(width=FALSE, height=FALSE)
window.rowconfigure(0, weight=1)
window.columnconfigure(0, weight=1)
self.show_connect()
"""
Shows the server connection form.
"""
def show_connect(self):
if self.frame:
self.frame.destroy()
self.window.title("Connect to server")
self.frame = Frame(self.window)
self.frame.grid(sticky=N+S+E+W, padx=10, pady=10)
Label(self.frame, text="Connect to a server", style="Title.TLabel").grid(columnspan=2, padx=10, pady=10)
Label(self.frame, text="Address").grid(row=1, column=0, sticky=E)
self.address_entry = Entry(self.frame)
self.address_entry.insert(END, LoginWindow.default_address)
self.address_entry.grid(row=1, column=1, padx=10, pady=10)
Label(self.frame, text="Port").grid(row=2, column=0, sticky=E)
self.port_entry = Entry(self.frame)
self.port_entry.insert(END, str(LoginWindow.default_port))
self.port_entry.grid(row=2, column=1, padx=10, pady=10)
button_frame = Frame(self.frame)
button_frame.grid(row=3, column=0, columnspan=2)
Button(button_frame, text="Connect", command=self.on_connect).grid(row=0, column=0, padx=10, pady=10)
Button(button_frame, text="Close", command=self.close).grid(row=0, column=1, padx=10, pady=10)
# Keyboard navigation.
self.address_entry.focus_set()
self.address_entry.bind("<Return>", lambda e: self.port_entry.focus_set())
self.port_entry.bind("<Return>", lambda e: self.on_connect())
"""
Shows the login form.
"""
def show_login(self):
if self.frame:
self.frame.destroy()
self.window.title("Login")
self.frame = Frame(self.window)
self.frame.grid(sticky=N+S+E+W, padx=10, pady=10)
Label(self.frame, text="Log in", style="Title.TLabel").grid(columnspan=2, padx=10, pady=10)
Label(self.frame, text="Username").grid(row=1, column=0, sticky=E)
self.username_entry = Entry(self.frame, textvariable=self.username)
self.username_entry.grid(row=1, column=1, padx=10, pady=10)
Label(self.frame, text="Password").grid(row=2, column=0, sticky=E)
self.password_entry = Entry(self.frame, textvariable=self.password, show="•")
self.password_entry.grid(row=2, column=1, padx=10, pady=10)
button_frame = Frame(self.frame)
button_frame.grid(row=3, column=0, columnspan=2)
Button(button_frame, text="Login", command=self.on_login).grid(row=0, column=0, padx=10, pady=10)
Button(button_frame, text="Sign Up", command=self.show_register).grid(row=0, column=1, padx=10, pady=10)
# Keyboard navigation.
self.username_entry.focus_set()
self.username_entry.bind("<Return>", lambda e: self.password_entry.focus_set())
self.password_entry.bind("<Return>", lambda e: self.on_login())
"""
Shows the registration form.
"""
def show_register(self):
if self.frame:
self.frame.destroy()
self.window.title("Register")
self.frame = Frame(self.window)
self.frame.grid(sticky=N+S+E+W, padx=10, pady=10)
# Have user create username
name = Label(self.frame, text="Username")
name.grid(row=0, sticky=E)
self.username_entry = Entry(self.frame, textvariable=self.username)
self.username_entry.grid(row=0,column=1, padx=10, pady=10)
# Have user enter password
password = Label(self.frame, text="Password")
password.grid(row=1, sticky=E)
self.password_entry = Entry(self.frame, textvariable=self.password, show="•")
self.password_entry.grid(row=1, column=1, padx=10, pady=10)
# Have user retype Password
repassword = Label(self.frame, text="Retype password")
repassword.grid(row=2, sticky=E)
self.repassword_entry = Entry(self.frame, show="•")
self.repassword_entry.grid(row=2, column=1, padx=10, pady=10)
# Have user enter first name
firstname = Label(self.frame, text="First name")
firstname.grid(row=3, sticky=E)
self.first_name_entry = Entry(self.frame)
self.first_name_entry.grid(row=3, column=1, padx=10, pady=10)
# Have user enter last name
lastname = Label(self.frame, text="Last name")
lastname.grid(row=4, sticky=E)
self.last_name_entry = Entry(self.frame)
self.last_name_entry.grid(row=4, column=1, padx=10, pady=10)
# Have user enter email
email = Label(self.frame, text="Email address")
email.grid(row=5, sticky=E)
self.email_entry = Entry(self.frame)
self.email_entry.grid(row=5, column=1, padx=10, pady=10)
# Have user enter address
address = Label(self.frame, text="Street address")
address.grid(row=6, sticky=E)
self.address_entry = Entry(self.frame)
self.address_entry.grid(row=6, column=1, padx=10, pady=10)
# Submit register information button that will send information to server
button_frame = Frame(self.frame)
button_frame.grid(row=7, column=0, columnspan=2)
Button(button_frame, text="Sign Up", command=self.on_register).grid(row=0, column=0, padx=10, pady=10)
Button(button_frame, text="Cancel", command=self.show_login).grid(row=0, column=1, padx=10, pady=10)
def on_connect(self):
address = self.address_entry.get()
port = int(self.port_entry.get())
try:
self.proxy = rpc.connect(address, port, Handler)
self.show_login()
except Exception as e:
messagebox.showerror("", "Could not connect to server.\n\nError: " + str(e))
def on_login(self):
username = self.username_entry.get()
password = self.password_entry.get()
try:
token = self.proxy.login(username=username, password=password)
# Open the chat window
self.frame.destroy()
ChatWindow(self.window, self.proxy, token)
except rpc.RpcException as e:
messagebox.showerror("", "Log in failed.\n\nError: " + str(e))
def on_register(self):
if self.repassword_entry.get() != self.password_entry.get():
messagebox.showerror("", "Password must match in both entries")
return
try:
self.proxy.create_user(
username = self.username_entry.get(),
password = self.password_entry.get(),
first_name = self.first_name_entry.get(),
last_name = self.last_name_entry.get(),
email = self.email_entry.get(),
address = self.address_entry.get()
)
messagebox.showinfo("", "Account created successfully!")
# Go back to login
self.show_login()
except rpc.RpcException as e:
messagebox.showerror("", "Registration failed.\n\nError: " + str(e))
def center(self):
self.window.update_idletasks()
w = self.window.winfo_screenwidth()
h = self.window.winfo_screenheight()
size = tuple(int(_) for _ in self.window.geometry().split('+')[0].split('x'))
x = w/2 - size[0]/2
y = h/2 - size[1]/2
self.window.geometry("+%d+%d" % (x, y))
def close(self):
if self.proxy:
self.proxy.close()
self.window.destroy()
"""
Main application window.
"""
class ChatWindow:
def __init__(self, window, proxy, token):
self.window = window
self.proxy = proxy
self.token = token
self.dest_username = None
self.dest_group = None
window.protocol("WM_DELETE_WINDOW", self.close)
window.minsize(width=200, height=200)
window.geometry("800x600")
window.resizable(width=TRUE, height=TRUE)
window.title("Instant Messenger")
window.rowconfigure(0, weight=1)
window.columnconfigure(0, weight=0)
window.columnconfigure(1, weight=1)
window.columnconfigure(2, weight=0)
self.group_frame = Frame(window)
self.group_frame.grid(row=0, column=0, sticky=N+S+E+W, padx=10, pady=10)
self.message_frame = Frame(window)
self.message_frame.grid(row=0, column=1, sticky=N+S+E+W, pady=10)
self.friends_frame = Frame(window)
self.friends_frame.grid(row=0, column=2, sticky=N+S+E+W, padx=10, pady=10)
# Groups frame.
Label(self.group_frame, text="Groups").grid(pady=(0,10))
self.group_frame.rowconfigure(1, weight=1)
self.group_list = None
Button(self.group_frame, text="Add user", command=self.on_add_group_user).grid(row=2)
Button(self.group_frame, text="Remove user", command=self.on_remove_group_user).grid(row=3)
Button(self.group_frame, text="New group", command=self.on_create_group).grid(row=4)
# Friends frame.
Label(self.friends_frame, text="Friends").grid(pady=(0,10))
self.friends_frame.rowconfigure(1, weight=1)
self.friends_list = None
Button(self.friends_frame, text="Add friend", command=self.on_add_friend).grid(row=2)
# Set up the chat log frame.
self.message_frame.rowconfigure(1, weight=1)
self.message_frame.columnconfigure(0, weight=1)
self.message_title = Label(self.message_frame)
self.message_title.grid(row=0, column=0, columnspan=2, pady=(0,10), sticky=N+S+E+W)
self.message_list = Listbox(self.message_frame)
self.message_list.grid(row=1, column=0, sticky=N+S+E+W)
self.message_scrollbar = Scrollbar(self.message_frame)
self.message_scrollbar.grid(row=1, column=1, sticky=N+S+E+W)
self.message_scrollbar.config(command=self.message_list.yview)
self.message_list.config(yscrollcommand=self.message_scrollbar.set)
# Set up the message input.
self.chat_entry = Entry(self.message_frame)
self.chat_entry.bind("<Return>", self.on_send_message)
self.chat_entry.grid(row=2, column=0, columnspan=2, sticky=N+S+E+W, pady=(5, 0), ipady=5)
self.chat_entry.focus_set()
# Show remote data.
self.refresh_groups_list()
self.refresh_friends_list()
# Schedule the incoming message callback.
self.window.after(100, self.check_message_queue)
"""
Refreshes the list of groups from the server.
"""
def refresh_groups_list(self):
groups = self.proxy.get_groups(token=self.token)
if self.group_list:
self.group_list.destroy()
self.group_list = Frame(self.group_frame)
for i, id in enumerate(groups):
group = self.proxy.get_group(token=self.token, id=id)
label = Button(self.group_list, text=group["name"], command=lambda g=id: self.choose_group(g))
label.grid(row=i, sticky=E+W)
self.group_list.grid(row=1, sticky=N+E+W)
"""
Refreshes the list of friends from the server.
"""
def refresh_friends_list(self):
friends = self.proxy.get_friends(token=self.token)
if self.friends_list:
self.friends_list.destroy()
self.friends_list = Frame(self.friends_frame)
for i, username in enumerate(friends):
label = Button(self.friends_list, text=username, command=lambda u=username: self.choose_user(u))
label.grid(row=i, sticky=E+W)
self.friends_list.grid(row=1, sticky=N+E+W)
"""
Displays the existing messages for the current room.
"""
def refresh_message_list(self):
# Remove messages already in the pane.
self.message_list.delete(0, END)
# If we are talking to a user,
if self.dest_username:
messages = self.proxy.get_messages_with_user(token=self.token, username=self.dest_username)
# If we are in a group
elif self.dest_group:
messages = self.proxy.get_messages_in_group(token=self.token, group=self.dest_group)
else:
return
for message in messages:
self.display_message(message)
"""
Sets the message destination to a user.
"""
def choose_user(self, username):
self.dest_group = None
self.dest_username = username
self.message_title.config(text="User: " + username)
self.refresh_message_list()
"""
Sets the message destination to a group.
"""
def choose_group(self, group_id):
self.dest_username = None
self.dest_group = group_id
group = self.proxy.get_group(token=self.token, id=group_id)
self.message_title.config(text="Group: " + group["name"])
self.refresh_message_list()
"""
Displays a message in the chat history.
"""
def display_message(self, message):
self.message_list.insert(END, message["sender"] + ": " + message["text"])
"""
Shows a dialog for adding a user to a group.
"""
def on_add_group_user(self):
if self.dest_group:
username = PromptWindow.prompt(self.window, "Type in a username")
self.proxy.add_group_user(token=self.token, group=self.dest_group, username=username)
self.refresh_groups_list()
self.choose_group(self.dest_group)
"""
Shows a dialog for removing a user from a group.
"""
def on_remove_group_user(self):
if self.dest_group:
username = PromptWindow.prompt(self.window, "Type in a username")
self.proxy.remove_group_user(token=self.token, group=self.dest_group, username=username)
self.refresh_groups_list()
self.choose_group(self.dest_group)
"""
Shows a dialog for creating a group.
"""
def on_create_group(self):
group_id = self.proxy.create_group(token=self.token)
self.refresh_groups_list()
self.choose_group(group_id)
"""
Shows a dialog for adding a friend.
"""
def on_add_friend(self):
username = PromptWindow.prompt(self.window, "Type in a username")
self.proxy.add_friend(token=self.token, username=username)
self.refresh_friends_list()
"""
Handles the event for sending a message.
"""
def on_send_message(self, event):
text = self.chat_entry.get()
# Slash commands are evaluated as Python code...
if text[0] == "/":
exec(text[1:])
# If we are talking to a user,
elif self.dest_username:
self.proxy.send_message(
token=self.token,
receiver={
"type": "user",
"username": self.dest_username,
},
text=text
)
# If we are in a group
elif self.dest_group:
self.proxy.send_message(
token=self.token,
receiver={
"type": "group",
"id": self.dest_group,
},
text=text
)
# Clear the message entry.
self.chat_entry.delete(0, END)
"""
Callback that runs periodically to display incoming messages in real-time.
"""
def check_message_queue(self):
while True:
try:
message = message_queue.get(False)
self.display_message(message)
except queue.Empty:
break
# Schedule again.
self.window.after(100, self.check_message_queue)
def close(self):
try:
self.proxy.logout(token=self.token)
self.proxy.close()
finally:
self.window.destroy()
"""
Convenience class for creating "prompt" dialog boxes.
"""
class PromptWindow:
def prompt(root, title):
window = PromptWindow(root, title)
root.wait_window(window.window)
return window.result
def __init__(self, root, title):
self.window = Toplevel(root)
self.window.resizable(width=FALSE, height=FALSE)
self.window.title(title)
self.label = Label(self.window, text=title)
self.label.grid(padx=10, pady=10)
self.entry = Entry(self.window)
self.entry.bind("<Return>", lambda e: self.submit())
self.entry.grid(row=1, padx=10)
self.entry.focus_set()
self.button = Button(self.window, text="OK", command=self.submit)
self.button.grid(row=2, padx=10, pady=10)
def submit(self):
self.result = self.entry.get()
self.window.destroy()
def run():
# Set up root window.
root = Tk()
# Make tkinter less ugly.
style = Style()
if "vista" in style.theme_names():
style.theme_use("vista")
elif "aqua" in style.theme_names():
style.theme_use("aqua")
else:
style.theme_use("clam")
style.configure("Title.TLabel", font=("Helvetica", 16))
# Show window.
window = LoginWindow(root)
window.center()
root.mainloop()
|
cs460-group1/chat-client
|
client/ui.py
|
Python
|
mit
| 18,643
|
[
"Amber"
] |
f09db4d644d6cc4c93753b110b08078cb176d711d19987a3e5793c5f73ee4ed2
|
"""
This sample demonstrates a new channel of customer service via Amazon Alexa Skills.
This demo shows a conversation between a customer and an intelligent agent (IA) Alexa via Amazon Echo
to collect information and setup a customer service callback. This new channel is an augmentation to the
traditional channels such as calling an IVR or customer callback contact form on a company website.
Using Twilio the customer can be connected to the following types of call centers:
a) Twilio powered modern contact center
b) A traditional call center by dialing a phone number via PSTN
c) A traditional call center by connecting to a PBX/SBC via SIP
For additional samples about using Alexa Skills, visit the Getting Started guide at
http://amzn.to/1LGWsLG
Ameer Badri
Twilio
"""
from __future__ import print_function
# import pytz
import sys
import requests
import config
import json
import urllib
import re
from twilio import twiml, TwilioRestException
from twilio.rest import TwilioRestClient
session_attributes = {}
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
#if (event['session']['application']['applicationId'] != "<YOUR_APPLICATON_ID>"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# lets call the intent processing directly
intent = {"name": "CustomerServiceIntent"}
return customer_callback_service(intent, session)
# Dispatch to your skill's launch
# return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "CustomerServiceIntent":
return customer_callback_service(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
card_title = "Twilio Customer Service"
speech_output = "Welcome to Twillio Customer Service. " \
" This channel is powered by Amazon Alexa Skills. You can start by saying, Alexa open Twilio customer callback"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Welcome to Twillio Customer Service. " \
" This channel is powered by Amazon Alexa Skills. You can start by saying, Alexa open Twilio customer callback"
should_end_session = True
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for using Twillio Customer Service. " \
"Have a nice day! "
reprompt_text = "Thank you for using Twillio Customer Service. " \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
# List of account names and additional details
# Replace name and phone numbers
accounts = {
"ameer": [{"phonenumber": "+447477121234",
"supportlevel": "Premium"}],
"simon": [{"phonenumber": "+14155081234",
"supportlevel": "Bronze"}]
}
def lookup_customer_account(name):
account = {}
account_name = name.lower()
print('Looking up Account Name: ' + account_name)
try:
account = accounts[account_name]
account = account[0]
except:
account = {}
print ('Account info: ' + json.dumps(account))
return (account)
def customer_callback_service(intent, session):
""" Sets the name to be called in the session and prepares the speech to reply to the
user.
"""
card_title = intent['name']
should_end_session = False
speech_output = ""
reprompt_text = ""
print ('Intent: ' + str(intent))
print ('Session: ' + str(session))
if 'False' in str(session['new']):
if 'get_customername' in session_attributes['callback_flow']:
if 'customername' in intent['slots']:
if 'value' in intent['slots']['customername']:
# This is a conversation about customer name
customername = intent['slots']['customername']['value']
session_attributes['customername'] = customername
print ('Looking up account details')
account_details = {}
account_details = lookup_customer_account(customername)
print ('Account details: ' + account_details['phonenumber'])
session_attributes['customer_phonenumber'] = account_details['phonenumber']
session_attributes['customer_supportlevel'] = account_details['supportlevel']
# We didn't find a callback phone number in our DB. Lets collect it first.
if not session_attributes['customer_phonenumber']:
session_attributes['callback_flow'] = 'get_phone_number';
print ('Session Attributes: ' + str(session_attributes))
speech_output = session_attributes['customername'] + ", I did not find a phone number for the callback in the system . Let me get your phone number. " \
"Please say your phone number starting with country code."
reprompt_output = session_attributes['customername'] + ", I did not find a phone number for the callback in the system. Let me get your phone number. " \
"Please say your phone number starting with country code."
else:
session_attributes['callback_flow'] = 'get_department'
print ('Session Attributes: ' + str(session_attributes))
speech_output = session_attributes['customername'] + ", I was able to lookup your phone number and account details in the system. " \
"Which department would you like to contact? Sales, Marketing or Support"
reprompt_text = session_attributes['customername'] + ", I was able to lookup your phone number and account details in the system. " \
"Which department would you like to contact? Sales, Marketing or Support"
else:
speech_output = "What is the account name?"
reprompt_text = "Please say the account name."
elif 'get_phone_number' in session_attributes['callback_flow']:
if 'phonenumber' in intent['slots']:
if 'value' in intent['slots']['phonenumber']:
# This is a conversation about collecting phone number
phonenumber = intent['slots']['phonenumber']['value']
phonenumber = '+' + re.sub(r'\D', "", phonenumber)
session_attributes['customer_phonenumber'] = phonenumber
session_attributes['callback_flow'] = 'get_department'
print ('Session Attributes: ' + str(session_attributes))
speech_output = "Which department would you like to contact? Sales, Marketing or Support"
reprompt_text = "Which department would you like to contact? Sales, Marketing or Support"
else:
speech_output = "Which department you would like to contact? Sales, Marketing or Support"
reprompt_text = "You can say. Sales, Marketing or Support"
elif 'get_department' in session_attributes['callback_flow']:
if 'department' in intent['slots']:
if 'value' in intent['slots']['department']:
# This is a conversation about department
departmentname = intent['slots']['department']['value']
session_attributes['departmentname'] = departmentname.lower()
session_attributes['callback_flow'] = 'get_reason'
print ('Session Attributes: ' + str(session_attributes))
speech_output = "Briefly describe the reason for your inquiry."
reprompt_text = "Briefly describe the reason for your inquiry."
else:
speech_output = "Which department you would like to contact? Sales, Marketing or Support."
reprompt_text = "You can say, Sales, Marketing or Support."
elif 'get_reason' in session_attributes['callback_flow']:
if 'reason' in intent['slots']:
if 'value' in intent['slots']['reason']:
# This is a conversation about callback reason
reason = intent['slots']['reason']['value']
session_attributes['reason'] = reason
session_attributes['callback_flow'] = 'get_callback_confirmation'
print ('Session Attributes: ' + str(session_attributes))
collected_info = 'Here is the summary of your request. Account Name ' + session_attributes['customername']
# collected_info = collected_info + '. Contact phone number <say-as interpret-as="telephone">' + session_attributes['customer_phonenumber'] + '</say-as>'
collected_info = collected_info + '. Support Level, ' + session_attributes['customer_supportlevel']
collected_info = collected_info + '. Department to contact, ' + session_attributes['departmentname']
collected_info = collected_info + '. Reason for callback, ' + session_attributes['reason']
speech_output = collected_info + ". Would you like to proceed? Say yes or no."
reprompt_text = " . Would you like to proceed? Say yes or no."
should_end_session = False
elif 'get_callback_confirmation' in session_attributes['callback_flow']:
if 'confirm' in intent['slots']:
if 'value' in intent['slots']['confirm']:
if intent['slots']['confirm']['value'].lower() in ['yes', 'yep']:
augmentation_type = session_attributes['augmentation_type']
if augmentation_type == 'twilio_contact_center':
#
# Augmentation Type: 1
# Call the Task creation API in Twilio Contact Center Demo app
#
task = {
'channel': 'phone',
'phone': session_attributes['customer_phonenumber'],
'name': session_attributes['customername'],
'supportlevel': session_attributes['customer_supportlevel'],
'text': 'Support Level - ' + session_attributes['customer_supportlevel'] + ': ' + session_attributes['reason'],
'team': session_attributes['departmentname'],
'type': 'Callback request'
}
# Create a taskrouter task in contact center for a callback (separate contact center demo install required)
# Change the domain name below to your install of the contact center demo
# For details see: https://github.com/nash-md/twilio-contact-center
r = requests.post('<yout_twilio_contact_center_demo>.herokuapp.com/api/tasks/callback', data = task)
elif augmentation_type == 'pstn':
#
# Augmentation Type: 2
# Call existing contact center over PSTN
#
# Replace the value with your call center agent phone number. E.164 format
agent_phone_number = "+14151234567"
say_language = config.LANGUAGE
resp_customer = twiml.Response()
resp_customer.pause()
resp_customer.say("Hello, " + session_attributes['customername'], voice="alice", language=say_language)
resp_customer.say("This is a callback from Twilio support. Connecting you to an agent.", voice="alice", language=say_language)
# Create the URL that Twilio will request after the customer is called
# Use the echo Twimlet to return TwiML that Twilio needs for the call
customer_url = "http://twimlets.com/echo?Twiml=" + urllib.quote_plus(str(resp_customer))
print("customer url: " + customer_url)
# Create Twiml that will be played to the call center agent when the call is answered
resp_agent = twiml.Response()
resp_agent.pause()
resp_agent.say("Customer " + session_attributes['customername'] + ", Is requesting a callback.", voice="alice", language=say_language)
resp_agent.say(",Reason for their inquiry, " + session_attributes['reason'], voice="alice", language=say_language)
resp_agent.say(",Dialing the customer ", voice="alice", language=say_language)
with resp_agent.dial(callerId = agent_phone_number) as r:
r.number(session_attributes['customer_phonenumber'], url=customer_url)
# Create the URL that Twilio will request after the agent is called
# Use the echo Twimlet to return TwiML that Twilio needs for the call
agent_url = "http://twimlets.com/echo?Twiml=" + urllib.quote_plus(str(resp_agent))
print("agent url: " + agent_url)
# Make an outbound call to the agent first and then dial the customer
# Replace the "to" with your contact center agent's phone number
client = TwilioRestClient(config.ACCOUNT_SID, config.AUTH_TOKEN)
call = client.calls.create(
to=agent_phone_number,
from_=session_attributes['customer_phonenumber'],
url=agent_url)
elif augmentation_type == 'sip':
#
# Augmentation Type: 3
# Call existing contact center over SIP
#
resp = twiml.Response()
resp.say("Hello, " + session_attributes['customername'])
resp.say("This is a callback from Twilio support. Connecting you to an agent.")
# Insert your company contact center SIP address
with resp.dial() as r:
r.sip("[email protected]")
resp_string = urllib.urlencode(str(resp))
session_attributes['callback_flow'] = 'callback_request_complete'
speech_output = "OK. I will pass this information to the next available agent. You will receive a call shortly."
reprompt_text = ""
# this will end the customer support request session
should_end_session = True
else:
speech_output = "Your request has been cancelled. Have a great day!"
reprompt_text = ""
# this will end the customer support request session
should_end_session = True
else:
speech_output = "Please re-try requesting customer service"
reprompt_text = ""
should_end_session = True
# Invoked at the very first instance of this customer callback flow
else:
# initialize configuration
# Callback_flow acts as a state machine that allows the creation of a
# structured conversation between the user and alexa.
session_attributes['callback_flow'] = 'get_customername'
# Augmentation_type is set to invoke one of the three options for
# connecting into the backend call center
# pstn: Dialing an agents phone number directly
# sip: connecting to a PBX or SBC
# twilio_contact_center: creating a callback task into the twilio contact center demo app
session_attributes['augmentation_type'] = 'pstn'
greeting_message = 'Hello!'
speech_output = greeting_message + " Welcome to Twillio Customer Service. " \
"First, I will collect some information, and then setup a " \
"callback from a customer service agent. Lets get started! What is the account name? "
reprompt_text = "Welcome to Twillio Customer Service. Lets get started. What is the account name? "
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
#
# --------------- Helpers that build all of the responses ----------------------
#
def build_speechlet_response_plaintext(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'SSML',
'ssml': '<speak>' + output + '</speak>'
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'SSML',
'ssml': '<speak>' + reprompt_text + '</speak>'
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
|
ameerbadri/amazon-alexa-twilio-customer-service
|
Lambda Function/lambda_function.py
|
Python
|
mit
| 21,342
|
[
"VisIt"
] |
4be8edb5049351ad3da1b733ff6ea2dba5a5aeaa0711d9ea9f4984cebebc1df4
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess, unittest, os
from TestHarnessTestCase import TestHarnessTestCase
def checkQstat():
try:
if subprocess.call(['qstat']) == 0:
return True
except:
pass
@unittest.skipIf(checkQstat() != True, "PBS not available")
class TestHarnessTester(TestHarnessTestCase):
"""
Test general PBS functionality. There are some caveats however:
We cannot test the output of specific test. Only the initial launch return code. This
is because launching qsub is a background process, and we have no idea when that job
is finished. Or if it even began (perhaps the job is queued).
"""
def setUp(self):
"""
setUp occurs before every test. Clean up previous results file
"""
pbs_results_file = os.path.join(os.getenv('MOOSE_DIR'), 'test', '_testPBS')
# File will not exist on the first run
try:
os.remove(pbs_results_file)
except:
pass
def testPBSQueue(self):
"""
Test argument '--pbs-queue does-not-exist' fails, as this queue should not exist
"""
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.runTests('--pbs-queue', 'does-not-exist', '--pbs', '_testPBS', '-i', 'always_ok')
e = cm.exception
self.assertRegex(e.output.decode('utf-8'), r'ERROR: qsub: Unknown queue')
def testPBSLaunch(self):
"""
Test general launch command
"""
output = self.runTests('--pbs', '_testPBS', '-i', 'always_ok').decode('utf-8')
self.assertNotIn('LAUNCHED', output)
|
harterj/moose
|
python/TestHarness/tests/test_PBS.py
|
Python
|
lgpl-2.1
| 1,925
|
[
"MOOSE"
] |
3c7f4a2fc713e1b552f707e9449c09f5dbc9747666276c150b89298df34b7bb9
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from django_xmlrpc.decorators import permission_required, xmlrpc_func
from karaage.common.decorators import xmlrpc_machine_required
from karaage.machines.models import Account
from karaage.projects.models import Project
@xmlrpc_machine_required()
@xmlrpc_func(returns='list', args=['string'])
def get_project_members(machine, project_id):
"""
Returns list of usernames given a project id
"""
try:
project = Project.objects.get(pid=project_id)
except Project.DoesNotExist:
return 'Project not found'
return [x.username for x in project.group.members.all()]
@xmlrpc_machine_required()
@xmlrpc_func(returns='list')
def get_projects(machine):
"""
Returns list of project ids
"""
query = Project.active.all()
return [x.pid for x in query]
@xmlrpc_func(returns='string', args=['string', 'string', 'string'])
def get_project(username, project, machine_name=None):
"""
Used in the submit filter to make sure user is in project
"""
try:
account = Account.objects.get(
username=username,
date_deleted__isnull=True)
except Account.DoesNotExist:
return "Account '%s' not found" % username
if project is None:
project = account.default_project
else:
try:
project = Project.objects.get(pid=project)
except Project.DoesNotExist:
project = account.default_project
if project is None:
return "None"
if account.person not in project.group.members.all():
project = account.default_project
if project is None:
return "None"
if account.person not in project.group.members.all():
return "None"
return project.pid
@permission_required()
@xmlrpc_func(returns='list')
def get_users_projects(user):
"""
List projects a user is part of
"""
person = user
projects = person.projects.filter(is_active=True)
return 0, [x.pid for x in projects]
|
brianmay/karaage
|
karaage/projects/xmlrpc.py
|
Python
|
gpl-3.0
| 2,722
|
[
"Brian"
] |
1becc665c34cc02d742ad3c71eff9cfa5d9105ff2fc7931db569ba29d404ac89
|
# Copyright 2008, 2009 CAMd
# (see accompanying license files for details).
"""Atomic Simulation Environment."""
from ase.atom import Atom
from ase.atoms import Atoms
|
grhawk/ASE
|
tools/ase/__init__.py
|
Python
|
gpl-2.0
| 170
|
[
"ASE"
] |
c2c4a9d39d556a71f686cc4f73fd9918251cbe65b9dfa4842265d7e83882d7cf
|
# This is not a random import, it's quite pertinent to the code
from random import random
# First, we create this maze world
length = 10
density = 0.2 # probability of generating a barrier in a cell
barrier = '+'
space = '_'
def gen_row():
def gen_cell():
if random() > 1 - density:
return barrier
else:
return space
return [gen_cell() for _ in xrange(length)]
maze = [gen_row() for _ in xrange(length)]
# Let's initialize the start and goal locations
start = (0,0)
maze[start[0]][start[1]] = 'S'
def gen_goal():
return (int(random() * length), int(random() * length))
goal = gen_goal()
while goal == start: # we shouldn't make this too easy...
goal = gen_goal()
maze[goal[0]][goal[1]] = 'G'
# We ought to see the world
def print_maze():
for row in maze:
for elem in row:
print elem,
print ''
print_maze()
# Time to Breadth First Search!
frontier = [start] # stores what's around our current place
visited = {} # where have we been?
backpointers = {} # how do we find the way home?
found = False # we may finish, never to have found the goal
while len(frontier) > 0:
visit = frontier[0]
del frontier[0]
if maze[visit[0]][visit[1]] == 'G':
found = True
break
visited[visit] = 1
# generate neighbor cells and add them to the frontier
neighbors = [(0,1),(0,-1),(1,0),(-1,0)]
neighbors = [tuple(map(sum, zip(x,visit))) for x in neighbors]
neighbors = filter(lambda x: min(x) >= 0 and max(x) < length, neighbors)
for n in neighbors:
if maze[n[0]][n[1]] == barrier:
continue
if n not in visited and n not in frontier:
backpointers[n] = visit
frontier.append(n)
if not found:
print '\nNo path found :('
else:
back = backpointers[goal]
path = []
while back != start:
path.append(back)
back = backpointers[back]
for step in path:
maze[step[0]][step[1]] = '@'
print '\nPath of length %d found!' % len(path)
print_maze()
|
FluxLemur/fluxlemur.github.io
|
assets/python/maze.py
|
Python
|
mit
| 2,080
|
[
"VisIt"
] |
1657b2d17f1f37b81adb1fbc8dd10f7f6cff48b408d18077e5fa9d013821c5f9
|
###############################################################################
# Copyright 2017-2021 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "fstd2nc" package.
#
# "fstd2nc" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "fstd2nc" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "fstd2nc". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
from setuptools import setup, find_packages
from fstd2nc import __version__
with open("README.md","r") as f:
long_description = f.read()
setup (
name="fstd2nc",
version=__version__,
description = 'Converts RPN standard files (from Environment Canada) to netCDF files.',
long_description = long_description,
# https://stackoverflow.com/a/26737258/9947646
long_description_content_type='text/markdown',
url = 'https://github.com/neishm/fstd2nc',
author="Mike Neish",
license = 'LGPL-3',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
],
packages = find_packages(),
setup_requires = ['pip >= 8.1'],
install_requires = ['numpy >= 1.13.0, != 1.15.3','netcdf4','fstd2nc-deps >= 0.20200304.0','progress'],
extras_require = {
'manyfiles': ['pandas'],
'array': ['xarray>=0.10.3','dask','toolz'],
'iris': ['iris>=2.0','xarray>=0.10.3','dask','toolz'],
'pygeode': ['pygeode>=1.2.2','xarray>=0.10.3','dask','toolz'],
},
package_data = {
'fstd2nc': ['locale/*/LC_MESSAGES/fstd2nc.mo'],
},
entry_points={
'console_scripts': [
'fstd2nc = fstd2nc.__main__:_fstd2nc_cmdline_trapped',
'fstdump = fstd2nc.__main__:_fstdump',
],
},
)
|
neishm/fstd2nc
|
setup.py
|
Python
|
lgpl-3.0
| 2,432
|
[
"NetCDF"
] |
dc667e33c7e67d6e61e0b5e66ba7e8bb9a3b259ce912dc1794c82b0c24eb466c
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
translate.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsProcessingParameterRasterLayer,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingOutputRasterLayer)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class gdaladdo(GdalAlgorithm):
INPUT = 'INPUT'
LEVELS = 'LEVELS'
CLEAN = 'CLEAN'
RESAMPLING = 'RESAMPLING'
FORMAT = 'FORMAT'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.methods = ((self.tr('Nearest neighbour'), 'nearest'),
(self.tr('Average'), 'average'),
(self.tr('Gaussian'), 'gauss'),
(self.tr('Cubic convolution.'), 'cubic'),
(self.tr('B-Spline convolution'), 'cubicspline'),
(self.tr('Lanczos windowed sinc'), 'lanczos'),
(self.tr('Average MP'), 'average_mp'),
(self.tr('Average in mag/phase space'), 'average_magphase'),
(self.tr('Mode'), 'mode'))
self.formats = (self.tr('Internal (if possible)'),
self.tr('External (GTiff .ovr)'),
self.tr('External (ERDAS Imagine .aux)'))
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterString(self.LEVELS,
self.tr('Overview levels'),
defaultValue='2 4 8 16'))
self.addParameter(QgsProcessingParameterBoolean(self.CLEAN,
self.tr('Remove all existing overviews'),
defaultValue=False))
params = []
params.append(QgsProcessingParameterEnum(self.RESAMPLING,
self.tr('Resampling method'),
options=[i[0] for i in self.methods],
allowMultiple=False,
defaultValue=0))
params.append(QgsProcessingParameterEnum(self.FORMAT,
self.tr('Overviews format'),
options=self.formats,
allowMultiple=False,
defaultValue=0))
self.addOutput(QgsProcessingOutputRasterLayer(self.OUTPUT, self.tr('Pyramidized')))
def name(self):
return 'overviews'
def displayName(self):
return self.tr('Build overviews (pyramids)')
def group(self):
return self.tr('Raster miscellaneous')
def groupId(self):
return 'rastermiscellaneous'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'raster-overview.png'))
def getConsoleCommands(self, parameters, context, feedback, executing=True):
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
fileName = inLayer.source()
arguments = []
arguments.append(fileName)
arguments.append('-r')
arguments.append(self.methods[self.parameterAsEnum(parameters, self.RESAMPLING, context)][1])
ovrFormat = self.parameterAsEnum(parameters, self.FORMAT, context)
if ovrFormat == 1:
arguments.append('-ro')
elif ovrFormat == 2:
arguments.extend('--config USE_RRD YES'.split(' '))
if self.parameterAsBool(parameters, self.CLEAN, context):
arguments.append('-clean')
arguments.extend(self.parameterAsString(parameters, self.LEVELS, context).split(' '))
self.setOutputValue(self.OUTPUT, fileName)
return ['gdaladdo', GdalUtils.escapeAndJoin(arguments)]
|
CS-SI/QGIS
|
python/plugins/processing/algs/gdal/gdaladdo.py
|
Python
|
gpl-2.0
| 5,418
|
[
"Gaussian"
] |
5585e2693e2a751aae19afb968f588abb820ee7ce5781486852bfe8dbd4f650d
|
"""
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.domains.std import Cmdoption
from sphinx.util.compat import Directive
from sphinx.util.console import bold
from sphinx.util.nodes import set_source_info
try:
from sphinx.writers.html import SmartyPantsHTMLTranslator as HTMLTranslator
except ImportError: # Sphinx 1.6+
from sphinx.writers.html import HTMLTranslator
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_directive('django-admin-option', Cmdoption)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
app.set_translator('djangohtml', DjangoHTMLTranslator)
app.set_translator('json', DjangoHTMLTranslator)
return {'parallel_read_safe': True}
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s snippet' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
code = node.rawsource.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append(
'\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (
# Some filenames have '_', which is special in latex.
fname.replace('_', r'\_'),
)
)
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
# Prevent rawsource from appearing in output a second time.
raise nodes.SkipNode
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
pass
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = '\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(HTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
def depart_desc_parameterlist(self, node):
self.body.append(')')
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
super().visit_section(node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env.ref_context['std:program'] = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return command
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super().finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatetag" and k == "ref/templates/builtins"
],
"tfilters": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatefilter" and k == "ref/templates/builtins"
],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
|
mitya57/django
|
docs/_ext/djangodocs.py
|
Python
|
bsd-3-clause
| 10,891
|
[
"VisIt"
] |
b94c795b2b2674ef233f20d3f14b437c5d945df25f828ad8af2c9858eaa3fee9
|
import os
import numpy as np
from numpy import ma
from what_file import what_format
from metadata import run_shp_info
try:
import gdal
except ImportError:
from osgeo import gdal
try:
import ogr
except ImportError:
from osgeo import ogr
try:
import osr
except ImportError:
from osgeo import osr
from gdalconst import GA_ReadOnly
def nc_to_gtif(latitudes, longitudes, values, geotiff_name):
print "Creating GeoTIFF"
file_format = "GTiff"
driver = gdal.GetDriverByName(file_format)
print "Getting values for making GeoTIFF"
try:
values = ma.array(values).data
except:
pass
print "Converting longitudes"
for i in range(len(longitudes)):
if longitudes[i] > 180:
longitudes[i] = longitudes[i] - 360
print "Calculating pixel size"
raster_x_size = longitudes.shape[0]
raster_y_size = latitudes.shape[0]
#print "Rotating GeoTIFF"
#values = numpy.rot90(values, 1)
print "Setting projection for GeoTIFF"
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
number_of_band = 1
raster_type = gdal.GDT_Float32
print "Creating raster dataset"
raster_dataset = driver.Create(geotiff_name,
raster_x_size,
raster_y_size,
number_of_band,
raster_type)
raster_dataset.GetRasterBand(1).WriteArray(values)
latitude_pixel_size = (abs(min(latitudes)) + abs(max(latitudes))) / len(latitudes)
longitude_pixel_size = (abs(min(longitudes)) + abs(max(longitudes))) / len(longitudes)
raster_dataset.SetGeoTransform((min(longitudes), latitude_pixel_size, 0, min(latitudes), 0, longitude_pixel_size))
raster_dataset.SetProjection(srs.ExportToWkt())
print "GeoTIFF Created"
def nc_to_geojson(latitudes, longitudes, values, geotiff_name):
print "Creating GeoJSON from netCDF"
#multipoint = ogr.Geometry(ogr.wkbMultiPoint)
print "multipoint geometry made"
with open('{0}.json'.format(geotiff_name.split(".")[0]), 'w') as json:
json.write('''{"type": "FeatureCollection","features": [''')
string = ""
for ilat, lat in enumerate(latitudes):
for ilon, lon in enumerate(longitudes):
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(float(lon), float(lat))
geojson_point = point.ExportToJson()
#multipoint.AddGeometry(point)
string += '{"type": "Feature","geometry":'
string += geojson_point
string += ',"properties": {"prop0": "'
string += "{0}".format(values[ilat][ilon])
string += '"}},'
string = string[:-1] + ']}'
json.write(string)
json.close()
print "netCDF to GeoJSON, Done"
def get_geojson(vector_path):
vector_format = what_format(vector_path)
if vector_format == "ESRI Shapefile":
vector_name = run_shp_info(vector_path)['layer_name']
GeoJSON_file_name = vector_name + "_wgs84.json"
if not os.path.isfile(GeoJSON_file_name):
string = "ogr2ogr -f GeoJSON -t_srs EPSG:4326 -lco \"WRITE_BBOX=YES\" {0} {1} ".format(GeoJSON_file_name, vector_path)
os.system(string)
return GeoJSON_file_name
def shp_to_kml(shp_path, kml_name):
try:
shp_datasource = ogr.Open(shp_path)
except:
raise "Shapefile cannot be opened"
sys.exit()
driver = ogr.GetDriverByName('KML')
layer_name = 'kml_layer'
kml_datasource = driver.CreateDataSource(kml_name)
layer = shp_datasource.GetLayerByIndex(0)
srs = layer.GetSpatialRef()
geom_type = layer.GetGeomType()
kml_layer = kml_datasource.CreateLayer(layer_name, srs, geom_type)
layer_number = shp_datasource.GetLayerCount()
for each in range(layer_number):
layer = shp_datasource.GetLayerByIndex(each)
features_number = layer.GetFeatureCount()
for i in range(features_number):
shp_feature = layer.GetFeature(i)
feature_geometry = shp_feature.GetGeometryRef()
kml_feature = ogr.Feature(kml_layer.GetLayerDefn())
kml_feature.SetGeometry(feature_geometry)
kml_layer.CreateFeature(kml_feature)
def shp_to_tif(selected_shp, tif_name, shp_to_tif_layer, shp_to_tif_epsg, shp_to_tif_width, shp_to_tif_height, shp_to_tif_ot, shp_to_tif_burn1, shp_to_tif_burn2, shp_to_tif_burn3):
string = " gdal_rasterize -a_srs EPSG:{0} -ts {1} {2} -ot {3} -burn {4} -burn {5} -burn {6} -l {7} {8}.shp {9}".format(shp_to_tif_epsg,
shp_to_tif_width,
shp_to_tif_height,
shp_to_tif_ot,
shp_to_tif_burn1,
shp_to_tif_burn2,
shp_to_tif_burn3,
shp_to_tif_layer,
selected_shp,
tif_name)
os.system(string)
def shp_to_json(selected_shp, shp_to_json, shp_to_json_epsg):
string = "ogr2ogr -f GeoJSON {0} {1}.shp ".format(shp_to_json, selected_shp)
if shp_to_json_epsg:
string += "-t_srs EPSG:{0}".format(shp_to_json_epsg)
os.system(string)
def convert_geotiff_to_kml(selected_geotiff, geotiff_to_kml_name):
string = "gdal_translate -of KMLSUPEROVERLAY {0} {1}".format(selected_geotiff, geotiff_to_kml_name)
os.system(string)
def geotiff_to_point_shp(selected_geotiff, tif_to_point_shp_name, tif_to_point_shp_layer_name, tif_to_point_shp_epsg):
gtiff_dataset = gdal.Open(selected_geotiff, GA_ReadOnly)
gtiff_band = gtiff_dataset.GetRasterBand(1) #TODO: get multi band in future
transform = gtiff_dataset.GetGeoTransform()
x_origin = transform[0]
y_origin = transform[3]
pixel_x_size = transform[1]
pixel_y_size = transform[5]
tif_x_size = gtiff_dataset.RasterXSize
tif_y_size = gtiff_dataset.RasterYSize
x_end = x_origin + (tif_x_size * pixel_x_size)
y_end = y_origin + (tif_y_size * pixel_y_size)
longitudes = np.arange(x_origin, x_end, pixel_x_size)
latitudes = np.arange(y_origin, y_end, pixel_y_size)
data = gtiff_band.ReadAsArray(0, 0, tif_x_size, tif_y_size)
driver = ogr.GetDriverByName('ESRI Shapefile')
data_source = driver.CreateDataSource(tif_to_point_shp_name)
srs = osr.SpatialReference()
srs.ImportFromEPSG(int(tif_to_point_shp_epsg))
layer = data_source.CreateLayer(tif_to_point_shp_layer_name, srs, ogr.wkbPoint)
long_field = ogr.FieldDefn("Longitude", ogr.OFTString)
long_field.SetWidth(24)
layer.CreateField(long_field)
lat_field = ogr.FieldDefn("Latitude", ogr.OFTString)
lat_field.SetWidth(24)
layer.CreateField(lat_field)
value_field = ogr.FieldDefn("Value", ogr.OFTString)
value_field.SetWidth(24)
layer.CreateField(value_field)
for xi, x in enumerate(longitudes):
for yi, y in enumerate(latitudes):
value = data[yi-1][xi-1]
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(x, y)
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetGeometry(point)
feature.SetField("Longitude", '{0}'.format(x))
feature.SetField("Latitude", '{0}'.format(y))
feature.SetField("Value", '{0}'.format(value))
layer.CreateFeature(feature)
feature.Destroy()
def geotiff_to_point_json(selected_geotiff, tif_to_point_json_name, tif_to_point_json_epsg):
gtiff_dataset = gdal.Open(selected_geotiff, GA_ReadOnly)
gtiff_band = gtiff_dataset.GetRasterBand(1) #TODO: get multi band in future
transform = gtiff_dataset.GetGeoTransform()
x_origin = transform[0]
y_origin = transform[3]
pixel_x_size = transform[1]
pixel_y_size = transform[5]
tif_x_size = gtiff_dataset.RasterXSize
tif_y_size = gtiff_dataset.RasterYSize
x_end = x_origin + (tif_x_size * pixel_x_size)
y_end = y_origin + (tif_y_size * pixel_y_size)
longitudes = np.arange(x_origin, x_end, pixel_x_size)
latitudes = np.arange(y_origin, y_end, pixel_y_size)
data = gtiff_band.ReadAsArray(0, 0, tif_x_size, tif_y_size)
multipoint = ogr.Geometry(ogr.wkbMultiPoint)
with open(tif_to_point_json_name, 'w') as f:
f.write('''{
"type": "FeatureCollection",
"features": [ {
"type": "Feature","geometry": ''')
for xi, x in enumerate(longitudes):
for yi, y in enumerate(latitudes):
value = data[yi-1][xi-1]
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(x, y)
multipoint.AddGeometry(point)
f.write(multipoint.ExportToJson())
f.write('''}]}''')
def convert_coord_to_point_shp(selected_coord_text, coord_to_point_shp_separator, coord_to_point_shp_lat_col, coord_to_point_shp_lon_col, coord_to_point_shp_value_col, coord_to_point_shp_name, coord_to_point_shp_layer_name, coord_to_point_shp_epsg):
try:
with open(selected_coord_text, 'r') as f:
latitudes = []
longitudes = []
values = []
for line in f:
latitudes.append(float(line.split("{0}".format(coord_to_point_shp_separator))[int(coord_to_point_shp_lat_col)]))
longitudes.append(float(line.split("{0}".format(coord_to_point_shp_separator))[int(coord_to_point_shp_lon_col)]))
values.append(line.split("{0}".format(coord_to_point_shp_separator))[int(coord_to_point_shp_value_col)])
driver = ogr.GetDriverByName('ESRI Shapefile')
data_source = driver.CreateDataSource(coord_to_point_shp_name)
srs = osr.SpatialReference()
srs.ImportFromEPSG(int(coord_to_point_shp_epsg))
layer = data_source.CreateLayer(coord_to_point_shp_layer_name, srs, ogr.wkbPoint)
field_name = ogr.FieldDefn("Name", ogr.OFTString)
field_name.SetWidth(24)
layer.CreateField(field_name)
for i in range(len(latitudes)):
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(longitudes[i], latitudes[i])
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetGeometry(point)
feature.SetField("Name", 'point_{0}'.format(str(i)))
layer.CreateFeature(feature)
feature.Destroy()
error = "Done."
except:
error = "Cannot access data. Something went wrong"
return error
|
MBoustani/GISCube
|
giscube_app/scripts/conversion.py
|
Python
|
apache-2.0
| 11,449
|
[
"NetCDF"
] |
d35214bba4cfee5fae9a2e7a5a41a8fd323d809be02cbe5557fa0cbd5bbfd2d1
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of device_utils.py (mostly DeviceUtils).
"""
# pylint: disable=protected-access
# pylint: disable=unused-argument
import collections
import contextlib
import io
import json
import logging
import os
import posixpath
import stat
import sys
import unittest
import six
from devil import devil_env
from devil.android import device_errors
from devil.android import device_signal
from devil.android import device_utils
from devil.android.ndk import abis
from devil.android.sdk import adb_wrapper
from devil.android.sdk import intent
from devil.android.sdk import keyevent
from devil.android.sdk import version_codes
from devil.utils import cmd_helper
from devil.utils import mock_calls
with devil_env.SysPath(os.path.join(devil_env.PY_UTILS_PATH)):
from py_utils import tempfile_ext
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
TEST_APK_PATH = '/fake/test/app.apk'
TEST_PACKAGE = 'test.package'
def Process(name, pid, ppid='1'):
return device_utils.ProcessInfo(name=name, pid=pid, ppid=ppid)
def Processes(*args):
return [Process(*arg) for arg in args]
class AnyStringWith(object):
def __init__(self, value):
self._value = value
def __eq__(self, other):
return self._value in other
def __repr__(self):
return '<AnyStringWith: %s>' % self._value
class _FakeContextManager(object):
def __init__(self, obj):
self._obj = obj
def __enter__(self):
return self._obj
def __exit__(self, type_, value, traceback):
pass
class _MockApkHelper(object):
def __init__(self, path, package_name, perms=None, splits=None):
self.path = path
self.is_bundle = path.endswith('_bundle')
self.package_name = package_name
self.perms = perms
self.splits = splits if splits else []
self.abis = [abis.ARM]
self.version_code = None
def GetPackageName(self):
return self.package_name
def GetPermissions(self):
return self.perms
def GetVersionCode(self):
return self.version_code
def GetAbis(self):
return self.abis
def GetApkPaths(self,
device,
modules=None,
allow_cached_props=False,
additional_locales=None):
return _FakeContextManager([self.path] + self.splits)
#override
@staticmethod
def SupportsSplits():
return True
class _MockMultipleDevicesError(Exception):
pass
class DeviceUtilsInitTest(unittest.TestCase):
def testInitWithStr(self):
serial_as_str = str('0123456789abcdef')
d = device_utils.DeviceUtils('0123456789abcdef')
self.assertEqual(serial_as_str, d.adb.GetDeviceSerial())
def testInitWithUnicode(self):
if six.PY2:
serial_as_unicode = unicode('fedcba9876543210')
d = device_utils.DeviceUtils(serial_as_unicode)
self.assertEqual(serial_as_unicode, d.adb.GetDeviceSerial())
def testInitWithAdbWrapper(self):
serial = '123456789abcdef0'
a = adb_wrapper.AdbWrapper(serial)
d = device_utils.DeviceUtils(a)
self.assertEqual(serial, d.adb.GetDeviceSerial())
def testInitWithMissing_fails(self):
with self.assertRaises(ValueError):
device_utils.DeviceUtils(None)
with self.assertRaises(ValueError):
device_utils.DeviceUtils('')
class DeviceUtilsGetAVDsTest(mock_calls.TestCase):
def testGetAVDs(self):
mocked_attrs = {'android_sdk': '/my/sdk/path'}
with mock.patch('devil.devil_env._Environment.LocalPath',
mock.Mock(side_effect=lambda a: mocked_attrs[a])):
with self.assertCall(
mock.call.devil.utils.cmd_helper.GetCmdOutput(
[mock.ANY, 'list', 'avd']), 'Available Android Virtual Devices:\n'
' Name: my_android5.0\n'
' Path: /some/path/to/.android/avd/my_android5.0.avd\n'
' Target: Android 5.0 (API level 21)\n'
' Tag/ABI: default/x86\n'
' Skin: WVGA800\n'):
self.assertEqual(['my_android5.0'], device_utils.GetAVDs())
class DeviceUtilsRestartServerTest(mock_calls.TestCase):
@mock.patch('time.sleep', mock.Mock())
def testRestartServer_succeeds(self):
with self.assertCalls(
mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.KillServer(),
(mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutput(
['pgrep', 'adb']), (1, '')),
mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.StartServer(),
(mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutput(
['pgrep', 'adb']),
(1, '')), (mock.call.devil.utils.cmd_helper.GetCmdStatusAndOutput(
['pgrep', 'adb']), (0, '123\n'))):
adb_wrapper.RestartServer()
class MockTempFile(object):
def __init__(self, name='/tmp/some/file'):
self.file = mock.MagicMock(spec=io.BufferedIOBase)
self.file.name = name
self.file.name_quoted = cmd_helper.SingleQuote(name)
def __enter__(self):
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def name(self):
return self.file.name
class MockLogger(mock.Mock):
def __init__(self, *args, **kwargs):
super(MockLogger, self).__init__(*args, **kwargs)
self.warnings = []
def warning(self, message, *args):
self.warnings.append(message % args)
def PatchLogger():
return mock.patch(
'devil.android.device_utils.logger', new_callable=MockLogger)
class _PatchedFunction(object):
def __init__(self, patched=None, mocked=None):
self.patched = patched
self.mocked = mocked
def _AdbWrapperMock(test_serial, is_ready=True):
adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
adb.__str__ = mock.Mock(return_value=test_serial)
adb.GetDeviceSerial.return_value = test_serial
adb.is_ready = is_ready
return adb
class DeviceUtilsTest(mock_calls.TestCase):
def setUp(self):
self.adb = _AdbWrapperMock('0123456789abcdef')
self.device = device_utils.DeviceUtils(
self.adb, default_timeout=10, default_retries=0)
self.watchMethodCalls(self.call.adb, ignore=['GetDeviceSerial'])
def safeAssertItemsEqual(self, expected, actual):
if six.PY2:
self.assertItemsEqual(expected, actual)
else:
self.assertCountEqual(expected, actual) # pylint: disable=no-member
def AdbCommandError(self, args=None, output=None, status=None, msg=None):
if args is None:
args = ['[unspecified]']
return mock.Mock(
side_effect=device_errors.AdbCommandFailedError(args, output, status,
msg, str(self.device)))
def CommandError(self, msg=None):
if msg is None:
msg = 'Command failed'
return mock.Mock(
side_effect=device_errors.CommandFailedError(msg, str(self.device)))
def ShellError(self, output=None, status=1):
def action(cmd, *args, **kwargs):
raise device_errors.AdbShellCommandFailedError(cmd, output, status,
str(self.device))
if output is None:
output = 'Permission denied\n'
return action
def TimeoutError(self, msg=None):
if msg is None:
msg = 'Operation timed out'
return mock.Mock(
side_effect=device_errors.CommandTimeoutError(msg, str(self.device)))
def EnsureCacheInitialized(self, props=None, sdcard='/sdcard'):
props = props or []
ret = [sdcard, 'TOKEN'] + props
return (self.call.device.RunShellCommand(
AnyStringWith('getprop'),
shell=True,
check_return=True,
large_output=True), ret)
class DeviceUtilsEqTest(DeviceUtilsTest):
def testEq_equal_deviceUtils(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdef'))
self.assertTrue(self.device == other)
self.assertTrue(other == self.device)
def testEq_equal_adbWrapper(self):
other = adb_wrapper.AdbWrapper('0123456789abcdef')
self.assertTrue(self.device == other)
self.assertTrue(other == self.device)
def testEq_equal_string(self):
other = '0123456789abcdef'
self.assertTrue(self.device == other)
self.assertTrue(other == self.device)
def testEq_devicesNotEqual(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdee'))
self.assertFalse(self.device == other)
self.assertFalse(other == self.device)
def testEq_identity(self):
self.assertTrue(self.device == self.device)
def testEq_serialInList(self):
devices = [self.device]
self.assertTrue('0123456789abcdef' in devices)
class DeviceUtilsLtTest(DeviceUtilsTest):
def testLt_lessThan(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('ffffffffffffffff'))
self.assertTrue(self.device < other)
self.assertTrue(other > self.device)
def testLt_greaterThan_lhs(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0000000000000000'))
self.assertFalse(self.device < other)
self.assertFalse(other > self.device)
def testLt_equal(self):
other = device_utils.DeviceUtils(_AdbWrapperMock('0123456789abcdef'))
self.assertFalse(self.device < other)
self.assertFalse(other > self.device)
def testLt_sorted(self):
devices = [
device_utils.DeviceUtils(_AdbWrapperMock('ffffffffffffffff')),
device_utils.DeviceUtils(_AdbWrapperMock('0000000000000000')),
]
sorted_devices = sorted(devices)
self.assertEqual('0000000000000000',
sorted_devices[0].adb.GetDeviceSerial())
self.assertEqual('ffffffffffffffff',
sorted_devices[1].adb.GetDeviceSerial())
class DeviceUtilsStrTest(DeviceUtilsTest):
def testStr_returnsSerial(self):
with self.assertCalls((self.call.adb.GetDeviceSerial(),
'0123456789abcdef')):
self.assertEqual('0123456789abcdef', str(self.device))
class DeviceUtilsIsOnlineTest(DeviceUtilsTest):
def testIsOnline_true(self):
with self.assertCall(self.call.adb.GetState(), 'device'):
self.assertTrue(self.device.IsOnline())
def testIsOnline_false(self):
with self.assertCall(self.call.adb.GetState(), 'offline'):
self.assertFalse(self.device.IsOnline())
def testIsOnline_error(self):
with self.assertCall(self.call.adb.GetState(), self.CommandError()):
self.assertFalse(self.device.IsOnline())
class DeviceUtilsHasRootTest(DeviceUtilsTest):
def testHasRoot_true(self):
with self.patch_call(self.call.device.build_type,
return_value='userdebug'), (self.assertCall(
self.call.adb.Shell('id'), 'uid=0(root)\n')):
self.assertTrue(self.device.HasRoot())
def testHasRootEngBuild_true(self):
with self.patch_call(self.call.device.build_type, return_value='eng'):
self.assertTrue(self.device.HasRoot())
def testHasRoot_false(self):
with self.patch_call(self.call.device.build_type,
return_value='userdebug'), (self.assertCall(
self.call.adb.Shell('id'), 'uid=2000(shell)\n')):
self.assertFalse(self.device.HasRoot())
class DeviceUtilsEnableRootTest(DeviceUtilsTest):
def testEnableRoot_succeeds(self):
with self.assertCalls(self.call.adb.Root(), self.call.adb.WaitForDevice(),
(self.call.device.HasRoot(), True)):
self.device.EnableRoot()
def testEnableRoot_userBuild(self):
with self.assertCalls((self.call.adb.Root(), self.AdbCommandError()),
(self.call.device.IsUserBuild(), True)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.EnableRoot()
def testEnableRoot_rootFails(self):
with self.assertCalls((self.call.adb.Root(), self.AdbCommandError()),
(self.call.device.IsUserBuild(), False)):
with self.assertRaises(device_errors.AdbCommandFailedError):
self.device.EnableRoot()
def testEnableRoot_timeoutInWaitForDevice(self):
with self.assertCalls(
(self.call.adb.Root(),
self.AdbCommandError(
output='timeout expired while waiting for device')),
(self.call.device.IsUserBuild(), False), self.call.adb.WaitForDevice(),
(self.call.device.HasRoot(), True)):
self.device.EnableRoot()
class DeviceUtilsIsUserBuildTest(DeviceUtilsTest):
def testIsUserBuild_yes(self):
with self.assertCall(
self.call.device.GetProp('ro.build.type', cache=True), 'user'):
self.assertTrue(self.device.IsUserBuild())
def testIsUserBuild_no(self):
with self.assertCall(
self.call.device.GetProp('ro.build.type', cache=True), 'userdebug'):
self.assertFalse(self.device.IsUserBuild())
class DeviceUtilsGetExternalStoragePathTest(DeviceUtilsTest):
def testGetExternalStoragePath_succeeds(self):
with self.assertCalls(
self.EnsureCacheInitialized(sdcard='/fake/storage/path')):
self.assertEqual('/fake/storage/path',
self.device.GetExternalStoragePath())
def testGetExternalStoragePath_fails(self):
with self.assertCalls(self.EnsureCacheInitialized(sdcard='')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetExternalStoragePath()
class DeviceUtilsGetAppWritablePathTest(DeviceUtilsTest):
def testGetAppWritablePath_succeeds_sdk_pre_q(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '28'),
self.EnsureCacheInitialized(sdcard='/fake/storage/path')):
self.assertEqual('/fake/storage/path', self.device.GetAppWritablePath())
def testGetAppWritablePath_succeeds_sdk_q(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '29'),
self.EnsureCacheInitialized(sdcard='/fake/storage/path')):
self.assertEqual('/fake/storage/path/Download',
self.device.GetAppWritablePath())
def testGetAppWritablePath_fails(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '29'),
self.EnsureCacheInitialized(sdcard='')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetAppWritablePath()
class DeviceUtilsIsApplicationInstalledTest(DeviceUtilsTest):
def testIsApplicationInstalled_installed(self):
with self.assertCalls((self.call.device.RunShellCommand(
['pm', 'list', 'packages', 'some.installed.app'], check_return=True),
['package:some.installed.app'])):
self.assertTrue(self.device.IsApplicationInstalled('some.installed.app'))
def testIsApplicationInstalled_notInstalled(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['pm', 'list', 'packages', 'not.installed.app'], check_return=True),
''),
(self.call.device.RunShellCommand(
['dumpsys', 'package'], check_return=True, large_output=True), [])):
self.assertFalse(self.device.IsApplicationInstalled('not.installed.app'))
def testIsApplicationInstalled_substringMatch(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['pm', 'list', 'packages', 'substring.of.package'],
check_return=True),
[
'package:first.substring.of.package',
'package:second.substring.of.package',
]),
(self.call.device.RunShellCommand(
['dumpsys', 'package'], check_return=True, large_output=True), [])):
self.assertFalse(
self.device.IsApplicationInstalled('substring.of.package'))
def testIsApplicationInstalled_dumpsysFallback(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['pm', 'list', 'packages', 'some.installed.app'],
check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'package'], check_return=True, large_output=True),
['Package [some.installed.app] (a12345):'])):
self.assertTrue(self.device.IsApplicationInstalled('some.installed.app'))
def testIsApplicationInstalled_dumpsysFallbackVersioned(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'package'], check_return=True, large_output=True),
['Package [some.installed.app_1234] (a12345):'])):
self.assertTrue(
self.device.IsApplicationInstalled('some.installed.app', 1234))
def testIsApplicationInstalled_dumpsysFallbackVersionNotNeeded(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'package'], check_return=True, large_output=True),
['Package [some.installed.app] (a12345):'])):
self.assertTrue(
self.device.IsApplicationInstalled('some.installed.app', 1234))
class DeviceUtilsGetApplicationPathsInternalTest(DeviceUtilsTest):
def testGetApplicationPathsInternal_exists(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.device.RunShellCommand(['pm', 'path', 'android'],
check_return=True),
['package:/path/to/android.apk'])):
self.assertEqual(['/path/to/android.apk'],
self.device._GetApplicationPathsInternal('android'))
def testGetApplicationPathsInternal_notExists(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.device.RunShellCommand(['pm', 'path', 'not.installed.app'],
check_return=True), '')):
self.assertEqual(
[], self.device._GetApplicationPathsInternal('not.installed.app'))
def testGetApplicationPathsInternal_garbageOutputRaises(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.device.RunShellCommand(['pm', 'path', 'android'],
check_return=True),
['garbage first line'])):
with self.assertRaises(device_errors.CommandFailedError):
self.device._GetApplicationPathsInternal('android')
def testGetApplicationPathsInternal_outputWarningsIgnored(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.device.RunShellCommand(['pm', 'path', 'not.installed.app'],
check_return=True),
['WARNING: some warning message from pm'])):
self.assertEqual(
[], self.device._GetApplicationPathsInternal('not.installed.app'))
def testGetApplicationPathsInternal_fails(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.device.RunShellCommand(['pm', 'path', 'android'],
check_return=True),
self.CommandError('ERROR. Is package manager running?\n'))):
with self.assertRaises(device_errors.CommandFailedError):
self.device._GetApplicationPathsInternal('android')
class DeviceUtils_GetApplicationVersionTest(DeviceUtilsTest):
def test_GetApplicationVersion_exists(self):
with self.assertCalls(
(self.call.adb.Shell('dumpsys package com.android.chrome'),
'Packages:\n'
' Package [com.android.chrome] (3901ecfb):\n'
' userId=1234 gids=[123, 456, 789]\n'
' pkg=Package{1fecf634 com.android.chrome}\n'
' versionName=45.0.1234.7\n')):
self.assertEqual('45.0.1234.7',
self.device.GetApplicationVersion('com.android.chrome'))
def test_GetApplicationVersion_notExists(self):
with self.assertCalls(
(self.call.adb.Shell('dumpsys package com.android.chrome'), '')):
self.assertEqual(None,
self.device.GetApplicationVersion('com.android.chrome'))
def test_GetApplicationVersion_fails(self):
with self.assertCalls(
(self.call.adb.Shell('dumpsys package com.android.chrome'),
'Packages:\n'
' Package [com.android.chrome] (3901ecfb):\n'
' userId=1234 gids=[123, 456, 789]\n'
' pkg=Package{1fecf634 com.android.chrome}\n')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetApplicationVersion('com.android.chrome')
class DeviceUtils_GetApplicationTargetSdkTest(DeviceUtilsTest):
def test_GetApplicationTargetSdk_exists(self):
with self.assertCalls(
(self.call.device.IsApplicationInstalled('com.android.chrome'), True),
(self.call.device._GetDumpsysOutput(['package', 'com.android.chrome'],
'targetSdk='),
[' versionCode=413200001 minSdk=21 targetSdk=29'])):
self.assertEqual(
'29', self.device.GetApplicationTargetSdk('com.android.chrome'))
def test_GetApplicationTargetSdk_notExists(self):
with self.assertCalls(
(self.call.device.IsApplicationInstalled('com.android.chrome'), False)):
self.assertIsNone(
self.device.GetApplicationTargetSdk('com.android.chrome'))
def test_GetApplicationTargetSdk_fails(self):
with self.assertCalls(
(self.call.device.IsApplicationInstalled('com.android.chrome'), True),
(self.call.device._GetDumpsysOutput(['package', 'com.android.chrome'],
'targetSdk='), [])):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetApplicationTargetSdk('com.android.chrome')
def test_GetApplicationTargetSdk_prefinalizedSdk(self):
with self.assertCalls(
(self.call.device.IsApplicationInstalled('com.android.chrome'), True),
(self.call.device._GetDumpsysOutput(['package', 'com.android.chrome'],
'targetSdk='),
[' versionCode=410301483 minSdk=10000 targetSdk=10000']),
(self.call.device.GetProp('ro.build.version.codename',
cache=True), 'R')):
self.assertEqual(
'R', self.device.GetApplicationTargetSdk('com.android.chrome'))
class DeviceUtils_GetUidForPackageTest(DeviceUtilsTest):
def test_GetUidForPackage_Exists(self):
with self.assertCall(
self.call.device._GetDumpsysOutput(
['package', 'com.android.chrome'], 'userId='),
[' userId=1001']):
self.assertEquals('1001',
self.device.GetUidForPackage('com.android.chrome'))
def test_GetUidForPackage_notInstalled(self):
with self.assertCall(
self.call.device._GetDumpsysOutput(
['package', 'com.android.chrome'], 'userId='),
['']):
self.assertEquals(None,
self.device.GetUidForPackage('com.android.chrome'))
def test_GetUidForPackage_fails(self):
with self.assertCall(
self.call.device._GetDumpsysOutput(
['package', 'com.android.chrome'], 'userId='),
[]):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetUidForPackage('com.android.chrome')
class DeviceUtils_GetPackageArchitectureTest(DeviceUtilsTest):
def test_GetPackageArchitecture_exists(self):
with self.assertCall(
self.call.device._RunPipedShellCommand(
'dumpsys package com.android.chrome | grep -F primaryCpuAbi'),
[' primaryCpuAbi=armeabi-v7a']):
self.assertEqual(abis.ARM,
self.device.GetPackageArchitecture('com.android.chrome'))
def test_GetPackageArchitecture_notExists(self):
with self.assertCall(
self.call.device._RunPipedShellCommand(
'dumpsys package com.android.chrome | grep -F primaryCpuAbi'), []):
self.assertEqual(None,
self.device.GetPackageArchitecture('com.android.chrome'))
class DeviceUtilsGetApplicationDataDirectoryTest(DeviceUtilsTest):
def testGetApplicationDataDirectory_exists(self):
with self.assertCalls(
(self.call.device.IsApplicationInstalled('foo.bar.baz'), True),
(self.call.device._RunPipedShellCommand(
'pm dump foo.bar.baz | grep dataDir='),
['dataDir=/data/data/foo.bar.baz'])):
self.assertEqual('/data/data/foo.bar.baz',
self.device.GetApplicationDataDirectory('foo.bar.baz'))
def testGetApplicationDataDirectory_notInstalled(self):
with self.assertCalls(
(self.call.device.IsApplicationInstalled('foo.bar.baz'), False)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetApplicationDataDirectory('foo.bar.baz')
def testGetApplicationDataDirectory_notExists(self):
with self.assertCalls(
(self.call.device.IsApplicationInstalled('foo.bar.baz'), True),
(self.call.device._RunPipedShellCommand(
'pm dump foo.bar.baz | grep dataDir='), self.ShellError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetApplicationDataDirectory('foo.bar.baz')
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsWaitUntilFullyBootedTest(DeviceUtilsTest):
def testWaitUntilFullyBooted_succeedsWithDefaults(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1')):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=False)
def testWaitUntilFullyBooted_succeedsWithWifi(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'),
'stuff\nWi-Fi is enabled\nmore stuff\n')):
self.device.WaitUntilFullyBooted(wifi=True, decrypt=False)
def testWaitUntilFullyBooted_succeedsWithDecryptFDE(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1'),
# decryption_completed
(self.call.device.GetProp('vold.decrypt', cache=False),
'trigger_restart_framework')):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=True)
def testWaitUntilFullyBooted_succeedsWithDecryptNotFDE(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1'),
# decryption_completed
(self.call.device.GetProp('vold.decrypt', cache=False), '')):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=True)
def testWaitUntilFullyBooted_deviceIsRock960(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), 'rk3399'),
(self.call.device.GetProp('sys.usb.config'), 'mtp,adb'),
(self.call.device.GetProp('ro.product.model'), 'rk3399'),
(self.call.device.GetProp('sys.usb.config'), 'adb'),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1')):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=False)
def testWaitUntilFullyBooted_deviceNotInitiallyAvailable(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.AdbCommandError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1')):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=False)
def testWaitUntilFullyBooted_deviceBrieflyOffline(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False),
self.AdbCommandError()),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1')):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=False)
def testWaitUntilFullyBooted_sdCardReadyFails_noPath(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=False)
def testWaitUntilFullyBooted_sdCardReadyFails_notExists(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=False)
def testWaitUntilFullyBooted_devicePmFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), self.CommandError()),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), self.CommandError()),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=False)
def testWaitUntilFullyBooted_bootFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '0'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '0'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=False)
def testWaitUntilFullyBooted_wifiFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=True, decrypt=False)
def testWaitUntilFullyBooted_decryptFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# is_device_connection_ready
(self.call.device.GetProp('ro.product.model'), ''),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device._GetApplicationPathsInternal(
'android', skip_cache=True), ['package:/some/fake/path']),
# boot_completed
(self.call.device.GetProp('sys.boot_completed', cache=False), '1'),
# decryption_completed
(self.call.device.GetProp('vold.decrypt', cache=False),
'trigger_restart_min_framework'),
# decryption_completed
(self.call.device.GetProp('vold.decrypt', cache=False),
'trigger_restart_min_framework'),
# decryption_completed
(self.call.device.GetProp('vold.decrypt', cache=False),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False, decrypt=True)
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsRebootTest(DeviceUtilsTest):
def testReboot_nonBlocking(self):
with self.assertCalls(self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False)):
self.device.Reboot(block=False)
def testReboot_blocking(self):
with self.assertCalls(
(self.call.device.HasRoot(), False),
self.call.adb.Reboot(), (self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=False, decrypt=False)):
self.device.Reboot(block=True)
def testReboot_blockingWithRoot(self):
with self.assertCalls(
(self.call.device.HasRoot(), True),
self.call.adb.Reboot(), (self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=False, decrypt=False),
self.call.device.EnableRoot()):
self.device.Reboot(block=True)
def testReboot_blockUntilWifi(self):
with self.assertCalls(
(self.call.device.HasRoot(), False),
self.call.adb.Reboot(), (self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=True, decrypt=False)):
self.device.Reboot(block=True, wifi=True, decrypt=False)
def testReboot_blockUntilDecrypt(self):
with self.assertCalls(
(self.call.device.HasRoot(), False),
self.call.adb.Reboot(), (self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=False, decrypt=True)):
self.device.Reboot(block=True, wifi=False, decrypt=True)
class DeviceUtilsInstallTest(DeviceUtilsTest):
mock_apk = _MockApkHelper(TEST_APK_PATH, TEST_PACKAGE, ['p1'])
def testInstall_noPriorInstall(self):
with self.patch_call(
self.call.device.product_name,
return_value='notflounder'), (self.patch_call(
self.call.device.build_version_sdk, return_value=23)):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
self.call.adb.Install(TEST_APK_PATH,
reinstall=False,
streaming=None,
allow_downgrade=False),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True),
(self.call.device.GrantPermissions(TEST_PACKAGE, ['p1']), [])):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_noStreaming(self):
with self.patch_call(
self.call.device.product_name,
return_value='flounder'), (self.patch_call(
self.call.device.build_version_sdk, return_value=23)):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
self.call.adb.Install(TEST_APK_PATH,
reinstall=False,
streaming=False,
allow_downgrade=False),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True),
(self.call.device.GrantPermissions(TEST_PACKAGE, ['p1']), [])):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_permissionsPreM(self):
with self.patch_call(
self.call.device.product_name,
return_value='notflounder'), (self.patch_call(
self.call.device.build_version_sdk, return_value=20)):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
(self.call.adb.Install(TEST_APK_PATH,
reinstall=False,
streaming=None,
allow_downgrade=False)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_findPermissions(self):
with self.patch_call(
self.call.device.product_name,
return_value='notflounder'), (self.patch_call(
self.call.device.build_version_sdk, return_value=23)):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
(self.call.adb.Install(TEST_APK_PATH,
reinstall=False,
streaming=None,
allow_downgrade=False)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True),
(self.call.device.GrantPermissions(TEST_PACKAGE, ['p1']), [])):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_passPermissions(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
(self.call.adb.Install(TEST_APK_PATH,
reinstall=False,
streaming=None,
allow_downgrade=False)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True),
(self.call.device.GrantPermissions(TEST_PACKAGE, ['p1', 'p2']), [])):
self.device.Install(
DeviceUtilsInstallTest.mock_apk,
retries=0,
permissions=['p1', 'p2'])
def testInstall_identicalPriorInstall(self):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE),
['/fake/data/app/test.package.apk']),
(self.call.device._ComputeStaleApks(TEST_PACKAGE, [TEST_APK_PATH]),
([], None)), (self.call.device.ClearApplicationState(TEST_PACKAGE)),
(self.call.device.ForceStop(TEST_PACKAGE)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.Install(
DeviceUtilsInstallTest.mock_apk, retries=0, permissions=[])
def testInstall_differentPriorInstall(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE),
['/fake/data/app/test.package.apk']),
(self.call.device._ComputeStaleApks(TEST_PACKAGE, [TEST_APK_PATH]),
([TEST_APK_PATH], None)), self.call.device.Uninstall(TEST_PACKAGE),
self.call.adb.Install(TEST_APK_PATH,
reinstall=False,
streaming=None,
allow_downgrade=False),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.Install(
DeviceUtilsInstallTest.mock_apk, retries=0, permissions=[])
def testInstall_differentPriorInstallSplitApk(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), [
'/fake/data/app/test.package.apk',
'/fake/data/app/test.package2.apk'
]), self.call.device.Uninstall(TEST_PACKAGE),
self.call.adb.Install(TEST_APK_PATH,
reinstall=False,
streaming=None,
allow_downgrade=False),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.Install(
DeviceUtilsInstallTest.mock_apk, retries=0, permissions=[])
def testInstall_differentPriorInstall_reinstall(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE),
['/fake/data/app/test.package.apk']),
(self.call.device._ComputeStaleApks(TEST_PACKAGE, [TEST_APK_PATH]),
([TEST_APK_PATH], None)),
self.call.adb.Install(TEST_APK_PATH,
reinstall=True,
streaming=None,
allow_downgrade=False),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.Install(
DeviceUtilsInstallTest.mock_apk,
reinstall=True,
retries=0,
permissions=[])
def testInstall_identicalPriorInstall_reinstall(self):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE),
['/fake/data/app/test.package.apk']),
(self.call.device._ComputeStaleApks(TEST_PACKAGE, [TEST_APK_PATH]),
([], None)), (self.call.device.ForceStop(TEST_PACKAGE)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.Install(
DeviceUtilsInstallTest.mock_apk,
reinstall=True,
retries=0,
permissions=[])
def testInstall_missingApk(self):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), False)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_fails(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
(self.call.adb.Install(
TEST_APK_PATH,
reinstall=False,
streaming=None,
allow_downgrade=False), self.CommandError('Failure\r\n'))):
with self.assertRaises(device_errors.CommandFailedError):
self.device.Install(DeviceUtilsInstallTest.mock_apk, retries=0)
def testInstall_downgrade(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE),
['/fake/data/app/test.package.apk']),
(self.call.device._ComputeStaleApks(TEST_PACKAGE, [TEST_APK_PATH]),
([TEST_APK_PATH], None)),
self.call.adb.Install(TEST_APK_PATH,
reinstall=True,
streaming=None,
allow_downgrade=True),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.Install(
DeviceUtilsInstallTest.mock_apk,
reinstall=True,
retries=0,
permissions=[],
allow_downgrade=True)
def testInstall_pushesFakeModulesToDevice(self):
@contextlib.contextmanager
def mock_zip_temp_dir():
yield '/test/tmp/dir'
mock_apk_with_fake = _MockApkHelper(
TEST_APK_PATH, TEST_PACKAGE, splits=['fake1-master.apk'])
fake_modules = ['fake1']
with self.patch_call(
self.call.device.product_name,
return_value='notflounder'), (self.patch_call(
self.call.device.build_version_sdk, return_value=23)):
with self.assertCalls(
(mock.call.py_utils.tempfile_ext.NamedTemporaryDirectory(),
mock_zip_temp_dir),
self.call.device.RunShellCommand([
'rm', '-rf',
'/sdcard/Android/data/test.package/files/local_testing'
],
as_root=True),
(mock.call.os.rename('fake1-master.apk', '/test/tmp/dir/fake1.apk')),
(self.call.device.PushChangedFiles(
[('/test/tmp/dir', '/data/local/tmp/modules/test.package')],
delete_device_stale=True)),
self.call.device.RunShellCommand([
'mkdir', '-p',
'/sdcard/Android/data/test.package/files/local_testing'
],
as_root=True),
self.call.device.RunShellCommand(
'cp -a /data/local/tmp/modules/test.package/* ' +
'/sdcard/Android/data/test.package/files/local_testing/',
as_root=True,
shell=True),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
self.call.adb.Install(TEST_APK_PATH,
reinstall=False,
streaming=None,
allow_downgrade=False),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True),
(self.call.device.GrantPermissions(TEST_PACKAGE, None), [])):
self.device.Install(
mock_apk_with_fake, fake_modules=fake_modules, retries=0)
def testInstall_packageNotAvailableAfterInstall(self):
with self.patch_call(
self.call.device.product_name,
return_value='notflounder'), (self.patch_call(
self.call.device.build_version_sdk, return_value=23)), (
self.patch_call(self.call.device.IsApplicationInstalled,
return_value=False)):
with self.assertCalls(
(self.call.device._FakeInstall(set(), None, 'test.package')),
(mock.call.os.path.exists(TEST_APK_PATH), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
self.call.adb.Install(TEST_APK_PATH,
reinstall=False,
streaming=None,
allow_downgrade=False)):
with six.assertRaisesRegex(
self, device_errors.CommandFailedError,
'not installed on device after explicit install attempt'):
self.device.Install(
DeviceUtilsInstallTest.mock_apk, retries=0)
class DeviceUtilsInstallSplitApkTest(DeviceUtilsTest):
mock_apk = _MockApkHelper('base.apk', TEST_PACKAGE, ['p1'],
['split1.apk', 'split2.apk'])
def testInstallSplitApk_noPriorInstall(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(mock.call.devil.android.apk_helper.ToSplitHelper(
'base.apk', ['split1.apk', 'split2.apk']),
DeviceUtilsInstallSplitApkTest.mock_apk),
(self.call.device._CheckSdkLevel(21)),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split1.apk'), True),
(mock.call.os.path.exists('split2.apk'), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
(self.call.adb.InstallMultiple(
['base.apk', 'split1.apk', 'split2.apk'],
partial=None,
reinstall=False,
streaming=None,
allow_downgrade=False)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.InstallSplitApk(
'base.apk', ['split1.apk', 'split2.apk'], permissions=[], retries=0)
def testInstallSplitApk_noStreaming(self):
with self.patch_call(
self.call.device.product_name, return_value='flounder'):
with self.assertCalls(
(mock.call.devil.android.apk_helper.ToSplitHelper(
'base.apk', ['split1.apk', 'split2.apk']),
DeviceUtilsInstallSplitApkTest.mock_apk),
(self.call.device._CheckSdkLevel(21)),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split1.apk'), True),
(mock.call.os.path.exists('split2.apk'), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), []),
(self.call.adb.InstallMultiple(
['base.apk', 'split1.apk', 'split2.apk'],
partial=None,
reinstall=False,
streaming=False,
allow_downgrade=False)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.InstallSplitApk(
'base.apk', ['split1.apk', 'split2.apk'], permissions=[], retries=0)
def testInstallSplitApk_partialInstall(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(mock.call.devil.android.apk_helper.ToSplitHelper(
DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk']),
DeviceUtilsInstallSplitApkTest.mock_apk),
(self.call.device._CheckSdkLevel(21)),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split1.apk'), True),
(mock.call.os.path.exists('split2.apk'), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE),
['base-on-device.apk', 'split2-on-device.apk']),
(self.call.device._ComputeStaleApks(
TEST_PACKAGE, ['base.apk', 'split1.apk', 'split2.apk']),
(['split2.apk'], None)),
(self.call.adb.InstallMultiple(['split2.apk'],
partial=TEST_PACKAGE,
reinstall=True,
streaming=None,
allow_downgrade=False)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.InstallSplitApk(
DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk'],
reinstall=True,
permissions=[],
retries=0)
def testInstallSplitApk_downgrade(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(mock.call.devil.android.apk_helper.ToSplitHelper(
DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk']),
DeviceUtilsInstallSplitApkTest.mock_apk),
(self.call.device._CheckSdkLevel(21)),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split1.apk'), True),
(mock.call.os.path.exists('split2.apk'), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE),
['base-on-device.apk', 'split2-on-device.apk']),
(self.call.device._ComputeStaleApks(
TEST_PACKAGE, ['base.apk', 'split1.apk', 'split2.apk']),
(['split2.apk'], None)),
(self.call.adb.InstallMultiple(['split2.apk'],
partial=TEST_PACKAGE,
reinstall=True,
streaming=None,
allow_downgrade=True)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.InstallSplitApk(
DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk'],
reinstall=True,
permissions=[],
retries=0,
allow_downgrade=True)
def testInstallSplitApk_missingSplit(self):
with self.assertCalls(
(mock.call.devil.android.apk_helper.ToSplitHelper(
DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk']),
DeviceUtilsInstallSplitApkTest.mock_apk),
(self.call.device._CheckSdkLevel(21)),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split1.apk'), True),
(mock.call.os.path.exists('split2.apk'), False)),\
self.assertRaises(device_errors.CommandFailedError):
self.device.InstallSplitApk(
DeviceUtilsInstallSplitApkTest.mock_apk, ['split1.apk', 'split2.apk'],
permissions=[],
retries=0)
def testInstallSplitApk_previouslyNonSplit(self):
with self.patch_call(
self.call.device.product_name, return_value='notflounder'):
with self.assertCalls(
(mock.call.devil.android.apk_helper.ToSplitHelper(
DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk']),
DeviceUtilsInstallSplitApkTest.mock_apk),
(self.call.device._CheckSdkLevel(21)),
(mock.call.os.path.exists('base.apk'), True),
(mock.call.os.path.exists('split1.apk'), True),
(mock.call.os.path.exists('split2.apk'), True),
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE),
['/fake/data/app/test.package.apk']),
self.call.device.Uninstall(TEST_PACKAGE),
(self.call.adb.InstallMultiple(
['base.apk', 'split1.apk', 'split2.apk'],
partial=None,
reinstall=False,
streaming=None,
allow_downgrade=False)),
(self.call.device.IsApplicationInstalled(TEST_PACKAGE, None), True)):
self.device.InstallSplitApk(
DeviceUtilsInstallSplitApkTest.mock_apk,
['split1.apk', 'split2.apk'],
permissions=[],
retries=0)
class DeviceUtilsUninstallTest(DeviceUtilsTest):
def testUninstall_callsThrough(self):
with self.assertCalls(
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE),
['/path.apk']), self.call.adb.Uninstall(TEST_PACKAGE, True)):
self.device.Uninstall(TEST_PACKAGE, True)
def testUninstall_noop(self):
with self.assertCalls(
(self.call.device._GetApplicationPathsInternal(TEST_PACKAGE), [])):
self.device.Uninstall(TEST_PACKAGE, True)
class DeviceUtilsSuTest(DeviceUtilsTest):
def testSu_preM(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP_MR1):
self.assertEqual('su -c foo', self.device._Su('foo'))
def testSu_mAndAbove(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
self.assertEqual('su 0 foo', self.device._Su('foo'))
class DeviceUtilsRunShellCommandTest(DeviceUtilsTest):
def setUp(self):
super(DeviceUtilsRunShellCommandTest, self).setUp()
self.device.NeedsSU = mock.Mock(return_value=False)
def testRunShellCommand_commandAsList(self):
with self.assertCall(self.call.adb.Shell('pm list packages'), ''):
self.device.RunShellCommand(['pm', 'list', 'packages'], check_return=True)
def testRunShellCommand_commandAsListQuoted(self):
with self.assertCall(self.call.adb.Shell("echo 'hello world' '$10'"), ''):
self.device.RunShellCommand(['echo', 'hello world', '$10'],
check_return=True)
def testRunShellCommand_commandAsString(self):
with self.assertCall(self.call.adb.Shell('echo "$VAR"'), ''):
self.device.RunShellCommand('echo "$VAR"', shell=True, check_return=True)
def testNewRunShellImpl_withEnv(self):
with self.assertCall(
self.call.adb.Shell('VAR=some_string echo "$VAR"'), ''):
self.device.RunShellCommand(
'echo "$VAR"',
shell=True,
check_return=True,
env={'VAR': 'some_string'})
def testNewRunShellImpl_withEnvQuoted(self):
with self.assertCall(
self.call.adb.Shell('PATH="$PATH:/other/path" run_this'), ''):
self.device.RunShellCommand(['run_this'],
check_return=True,
env={'PATH': '$PATH:/other/path'})
def testNewRunShellImpl_withEnv_failure(self):
with self.assertRaises(KeyError):
self.device.RunShellCommand(['some_cmd'],
check_return=True,
env={'INVALID NAME': 'value'})
def testNewRunShellImpl_withCwd(self):
with self.assertCall(self.call.adb.Shell('cd /some/test/path && ls'), ''):
self.device.RunShellCommand(['ls'],
check_return=True,
cwd='/some/test/path')
def testNewRunShellImpl_withCwdQuoted(self):
with self.assertCall(
self.call.adb.Shell("cd '/some test/path with/spaces' && ls"), ''):
self.device.RunShellCommand(['ls'],
check_return=True,
cwd='/some test/path with/spaces')
def testRunShellCommand_withHugeCmd(self):
payload = 'hi! ' * 1024
expected_cmd = "echo '%s'" % payload
with self.assertCalls(
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
self.call.device._WriteFileWithPush('/sdcard/temp-123.sh',
expected_cmd),
(self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
self.assertEqual([payload],
self.device.RunShellCommand(['echo', payload],
check_return=True))
def testRunShellCommand_withHugeCmdAndSu(self):
payload = 'hi! ' * 1024
expected_cmd_without_su = """sh -c 'echo '"'"'%s'"'"''""" % payload
expected_cmd = 'su -c %s' % expected_cmd_without_su
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device._Su(expected_cmd_without_su), expected_cmd),
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
self.call.device._WriteFileWithPush('/sdcard/temp-123.sh',
expected_cmd),
(self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
self.assertEqual([payload],
self.device.RunShellCommand(['echo', payload],
check_return=True,
as_root=True))
def testRunShellCommand_withSu(self):
expected_cmd_without_su = "sh -c 'setprop service.adb.root 0'"
expected_cmd = 'su -c %s' % expected_cmd_without_su
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device._Su(expected_cmd_without_su), expected_cmd),
(self.call.adb.Shell(expected_cmd), '')):
self.device.RunShellCommand(['setprop', 'service.adb.root', '0'],
check_return=True,
as_root=True)
def testRunShellCommand_withRunAs(self):
expected_cmd_without_run_as = "sh -c 'mkdir -p files'"
expected_cmd = (
'run-as org.devil.test_package %s' % expected_cmd_without_run_as)
with self.assertCall(self.call.adb.Shell(expected_cmd), ''):
self.device.RunShellCommand(['mkdir', '-p', 'files'],
check_return=True,
run_as='org.devil.test_package')
def testRunShellCommand_withRunAsAndSu(self):
expected_cmd_with_nothing = "sh -c 'mkdir -p files'"
expected_cmd_with_run_as = (
'run-as org.devil.test_package %s' % expected_cmd_with_nothing)
expected_cmd_without_su = (
'sh -c %s' % cmd_helper.SingleQuote(expected_cmd_with_run_as))
expected_cmd = 'su -c %s' % expected_cmd_without_su
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device._Su(expected_cmd_without_su), expected_cmd),
(self.call.adb.Shell(expected_cmd), '')):
self.device.RunShellCommand(['mkdir', '-p', 'files'],
check_return=True,
run_as='org.devil.test_package',
as_root=True)
def testRunShellCommand_manyLines(self):
cmd = 'ls /some/path'
with self.assertCall(self.call.adb.Shell(cmd), 'file1\nfile2\nfile3\n'):
self.assertEqual(['file1', 'file2', 'file3'],
self.device.RunShellCommand(cmd.split(),
check_return=True))
def testRunShellCommand_manyLinesRawOutput(self):
cmd = 'ls /some/path'
with self.assertCall(self.call.adb.Shell(cmd), '\rfile1\nfile2\r\nfile3\n'):
self.assertEqual(
'\rfile1\nfile2\r\nfile3\n',
self.device.RunShellCommand(cmd.split(),
check_return=True,
raw_output=True))
def testRunShellCommand_singleLine_success(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), 'some value\n'):
self.assertEqual(
'some value',
self.device.RunShellCommand(cmd,
shell=True,
check_return=True,
single_line=True))
def testRunShellCommand_singleLine_successEmptyLine(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), '\n'):
self.assertEqual(
'',
self.device.RunShellCommand(cmd,
shell=True,
check_return=True,
single_line=True))
def testRunShellCommand_singleLine_successWithoutEndLine(self):
cmd = 'echo -n $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), 'some value'):
self.assertEqual(
'some value',
self.device.RunShellCommand(cmd,
shell=True,
check_return=True,
single_line=True))
def testRunShellCommand_singleLine_successNoOutput(self):
cmd = 'echo -n $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), ''):
self.assertEqual(
'',
self.device.RunShellCommand(cmd,
shell=True,
check_return=True,
single_line=True))
def testRunShellCommand_singleLine_failTooManyLines(self):
cmd = 'echo $VALUE'
with self.assertCall(
self.call.adb.Shell(cmd), 'some value\nanother value\n'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.RunShellCommand(
cmd, shell=True, check_return=True, single_line=True)
def testRunShellCommand_checkReturn_success(self):
cmd = 'echo $ANDROID_DATA'
output = '/data\n'
with self.assertCall(self.call.adb.Shell(cmd), output):
self.assertEqual([output.rstrip()],
self.device.RunShellCommand(cmd,
shell=True,
check_return=True))
def testRunShellCommand_checkReturn_failure(self):
cmd = 'ls /root'
output = 'opendir failed, Permission denied\n'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
with self.assertRaises(device_errors.AdbCommandFailedError):
self.device.RunShellCommand(cmd.split(), check_return=True)
def testRunShellCommand_checkReturn_disabled(self):
cmd = 'ls /root'
output = 'opendir failed, Permission denied\n'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
self.assertEqual([output.rstrip()],
self.device.RunShellCommand(cmd.split(),
check_return=False))
def testRunShellCommand_largeOutput_enabled(self):
cmd = 'echo $VALUE'
temp_file = MockTempFile('/sdcard/temp-123')
cmd_redirect = '( %s )>%s 2>&1' % (cmd, temp_file.name)
with self.assertCalls(
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb), temp_file), (self.call.adb.Shell(cmd_redirect)),
(self.call.device.ReadFile(
temp_file.name, force_pull=True, encoding='utf8'), 'something')):
self.assertEqual(['something'],
self.device.RunShellCommand(cmd,
shell=True,
large_output=True,
check_return=True))
def testRunShellCommand_largeOutput_disabledNoTrigger(self):
cmd = 'something'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError('')):
with self.assertRaises(device_errors.AdbCommandFailedError):
self.device.RunShellCommand([cmd], check_return=True)
def testRunShellCommand_largeOutput_disabledTrigger(self):
cmd = 'echo $VALUE'
temp_file = MockTempFile('/sdcard/temp-123')
cmd_redirect = '( %s )>%s 2>&1' % (cmd, temp_file.name)
with self.assertCalls(
(self.call.adb.Shell(cmd), self.ShellError('', None)),
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb), temp_file), (self.call.adb.Shell(cmd_redirect)),
(self.call.device.ReadFile(mock.ANY, force_pull=True,
encoding='utf8'), 'something')):
self.assertEqual(['something'],
self.device.RunShellCommand(cmd,
shell=True,
check_return=True))
class DeviceUtilsRunPipedShellCommandTest(DeviceUtilsTest):
def testRunPipedShellCommand_success(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
shell=True,
check_return=True), ['This line contains foo', 'PIPESTATUS: 0 0']):
self.assertEqual(['This line contains foo'],
self.device._RunPipedShellCommand('ps | grep foo'))
def testRunPipedShellCommand_firstCommandFails(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
shell=True,
check_return=True), ['PIPESTATUS: 1 0']):
with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
self.device._RunPipedShellCommand('ps | grep foo')
self.assertEqual([1, 0], ec.exception.status)
def testRunPipedShellCommand_secondCommandFails(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
shell=True,
check_return=True), ['PIPESTATUS: 0 1']):
with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
self.device._RunPipedShellCommand('ps | grep foo')
self.assertEqual([0, 1], ec.exception.status)
def testRunPipedShellCommand_outputCutOff(self):
with self.assertCall(
self.call.device.RunShellCommand(
'ps | grep foo; echo "PIPESTATUS: ${PIPESTATUS[@]}"',
shell=True,
check_return=True), ['foo.bar'] * 256 + ['foo.ba']):
with self.assertRaises(device_errors.AdbShellCommandFailedError) as ec:
self.device._RunPipedShellCommand('ps | grep foo')
self.assertIs(None, ec.exception.status)
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsKillAllTest(DeviceUtilsTest):
def testKillAll_noMatchingProcessesFailure(self):
with self.assertCall(self.call.device.ListProcesses('test_process'), []):
with self.assertRaises(device_errors.CommandFailedError):
self.device.KillAll('test_process')
def testKillAll_noMatchingProcessesQuiet(self):
with self.assertCall(self.call.device.ListProcesses('test_process'), []):
self.assertEqual(0, self.device.KillAll('test_process', quiet=True))
def testKillAll_nonblocking(self):
with self.assertCalls((self.call.device.ListProcesses('some.process'),
Processes(('some.process', 1234),
('some.process.thing', 5678))),
(self.call.adb.Shell('kill -9 1234 5678'), '')):
self.assertEqual(2, self.device.KillAll('some.process', blocking=False))
def testKillAll_blocking(self):
with self.assertCalls(
(self.call.device.ListProcesses('some.process'),
Processes(('some.process', 1234), ('some.process.thing', 5678))),
(self.call.adb.Shell('kill -9 1234 5678'), ''),
(self.call.device.ListProcesses('some.process'),
Processes(('some.process.thing', 5678))),
(
self.call.device.ListProcesses('some.process'),
# Other instance with different pid.
Processes(('some.process', 111)))):
self.assertEqual(2, self.device.KillAll('some.process', blocking=True))
def testKillAll_exactNonblocking(self):
with self.assertCalls((self.call.device.ListProcesses('some.process'),
Processes(('some.process', 1234),
('some.process.thing', 5678))),
(self.call.adb.Shell('kill -9 1234'), '')):
self.assertEqual(
1, self.device.KillAll('some.process', exact=True, blocking=False))
def testKillAll_exactBlocking(self):
with self.assertCalls((self.call.device.ListProcesses('some.process'),
Processes(('some.process', 1234),
('some.process.thing', 5678))),
(self.call.adb.Shell('kill -9 1234'), ''),
(self.call.device.ListProcesses('some.process'),
Processes(('some.process', 1234),
('some.process.thing', 5678))),
(self.call.device.ListProcesses('some.process'),
Processes(('some.process.thing', 5678)))):
self.assertEqual(
1, self.device.KillAll('some.process', exact=True, blocking=True))
def testKillAll_root(self):
with self.assertCalls(
(self.call.device.ListProcesses('some.process'),
Processes(('some.process', 1234))), (self.call.device.NeedsSU(), True),
(self.call.device._Su("sh -c 'kill -9 1234'"),
"su -c sh -c 'kill -9 1234'"),
(self.call.adb.Shell("su -c sh -c 'kill -9 1234'"), '')):
self.assertEqual(1, self.device.KillAll('some.process', as_root=True))
def testKillAll_sigterm(self):
with self.assertCalls((self.call.device.ListProcesses('some.process'),
Processes(('some.process', 1234))),
(self.call.adb.Shell('kill -15 1234'), '')):
self.assertEqual(
1, self.device.KillAll('some.process', signum=device_signal.SIGTERM))
def testKillAll_multipleInstances(self):
with self.assertCalls((self.call.device.ListProcesses('some.process'),
Processes(('some.process', 1234),
('some.process', 4567))),
(self.call.adb.Shell('kill -15 1234 4567'), '')):
self.assertEqual(
2, self.device.KillAll('some.process', signum=device_signal.SIGTERM))
class DeviceUtilsStartActivityTest(DeviceUtilsTest):
def testStartActivity_actionOnly(self):
test_intent = intent.Intent(action='android.intent.action.VIEW')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_success(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_failure(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Error: Failed to start test activity'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.StartActivity(test_intent)
def testStartActivity_blocking(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-W '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent, blocking=True)
def testStartActivity_withCategory(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main',
category='android.intent.category.HOME')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-c android.intent.category.HOME '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withMultipleCategories(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main',
category=[
'android.intent.category.HOME', 'android.intent.category.BROWSABLE'
])
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-c android.intent.category.HOME '
'-c android.intent.category.BROWSABLE '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withData(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main',
data='http://www.google.com/')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-d http://www.google.com/ '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withStringExtra(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main',
extras={'foo': 'test'})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main '
'--es foo test'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withBoolExtra(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main',
extras={'foo': True})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main '
'--ez foo True'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withIntExtra(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main',
extras={'foo': 123})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main '
'--ei foo 123'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withTraceFile(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'--start-profiler test_trace_file.out '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(
test_intent, trace_file_name='test_trace_file.out')
def testStartActivity_withForceStop(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-S '
'-a android.intent.action.VIEW '
'-n test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent, force_stop=True)
def testStartActivity_withFlags(self):
test_intent = intent.Intent(
action='android.intent.action.VIEW',
package=TEST_PACKAGE,
activity='.Main',
flags=[
intent.FLAG_ACTIVITY_NEW_TASK,
intent.FLAG_ACTIVITY_RESET_TASK_IF_NEEDED
])
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n test.package/.Main '
'-f 0x10200000'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
class DeviceUtilsStartServiceTest(DeviceUtilsTest):
def testStartService_success(self):
test_intent = intent.Intent(
action='android.intent.action.START',
package=TEST_PACKAGE,
activity='.Main')
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.NOUGAT):
with self.assertCall(
self.call.adb.Shell('am startservice '
'-a android.intent.action.START '
'-n test.package/.Main'),
'Starting service: Intent { act=android.intent.action.START }'):
self.device.StartService(test_intent)
def testStartService_failure(self):
test_intent = intent.Intent(
action='android.intent.action.START',
package=TEST_PACKAGE,
activity='.Main')
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.NOUGAT):
with self.assertCall(
self.call.adb.Shell('am startservice '
'-a android.intent.action.START '
'-n test.package/.Main'),
'Error: Failed to start test service'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.StartService(test_intent)
def testStartService_withUser(self):
test_intent = intent.Intent(
action='android.intent.action.START',
package=TEST_PACKAGE,
activity='.Main')
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.NOUGAT):
with self.assertCall(
self.call.adb.Shell('am startservice '
'--user TestUser '
'-a android.intent.action.START '
'-n test.package/.Main'),
'Starting service: Intent { act=android.intent.action.START }'):
self.device.StartService(test_intent, user_id='TestUser')
def testStartService_onOreo(self):
test_intent = intent.Intent(
action='android.intent.action.START',
package=TEST_PACKAGE,
activity='.Main')
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.OREO):
with self.assertCall(
self.call.adb.Shell('am start-service '
'-a android.intent.action.START '
'-n test.package/.Main'),
'Starting service: Intent { act=android.intent.action.START }'):
self.device.StartService(test_intent)
class DeviceUtilsStartInstrumentationTest(DeviceUtilsTest):
def testStartInstrumentation_nothing(self):
with self.assertCalls(
self.call.device.RunShellCommand(
'p=test.package;am instrument "$p"/.TestInstrumentation',
shell=True,
check_return=True,
large_output=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False,
raw=False,
extras=None)
def testStartInstrumentation_finish(self):
with self.assertCalls((self.call.device.RunShellCommand(
'p=test.package;am instrument -w "$p"/.TestInstrumentation',
shell=True,
check_return=True,
large_output=True), ['OK (1 test)'])):
output = self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=True,
raw=False,
extras=None)
self.assertEqual(['OK (1 test)'], output)
def testStartInstrumentation_raw(self):
with self.assertCalls(
self.call.device.RunShellCommand(
'p=test.package;am instrument -r "$p"/.TestInstrumentation',
shell=True,
check_return=True,
large_output=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False,
raw=True,
extras=None)
def testStartInstrumentation_extras(self):
with self.assertCalls(
self.call.device.RunShellCommand(
'p=test.package;am instrument -e "$p".foo Foo -e bar \'Val \'"$p" '
'"$p"/.TestInstrumentation',
shell=True,
check_return=True,
large_output=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False,
raw=False,
extras={
'test.package.foo': 'Foo',
'bar': 'Val test.package'
})
class DeviceUtilsBroadcastIntentTest(DeviceUtilsTest):
def testBroadcastIntent_noExtras(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT')
with self.assertCall(
self.call.adb.Shell('am broadcast -a test.package.with.an.INTENT'),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
def testBroadcastIntent_withExtra(self):
test_intent = intent.Intent(
action='test.package.with.an.INTENT', extras={'foo': 'bar value'})
with self.assertCall(
self.call.adb.Shell(
"am broadcast -a test.package.with.an.INTENT --es foo 'bar value'"),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
def testBroadcastIntent_withExtra_noValue(self):
test_intent = intent.Intent(
action='test.package.with.an.INTENT', extras={'foo': None})
with self.assertCall(
self.call.adb.Shell(
'am broadcast -a test.package.with.an.INTENT --esn foo'),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
class DeviceUtilsGoHomeTest(DeviceUtilsTest):
def testGoHome_popupsExist(self):
with self.assertCalls(
(self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand([
'am', 'start', '-W', '-a', 'android.intent.action.MAIN', '-c',
'android.intent.category.HOME'
],
check_return=True),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'
''),
(self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(['input', 'keyevent', '66'],
check_return=True)),
(self.call.device.RunShellCommand(['input', 'keyevent', '4'],
check_return=True)),
(self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True),
['mResumedActivity Launcher'])):
self.device.GoHome()
def testGoHome_willRetry(self):
with self.assertCalls(
(self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand([
'am', 'start', '-W', '-a', 'android.intent.action.MAIN', '-c',
'android.intent.category.HOME'
],
check_return=True),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'
''),
(self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(
['input', 'keyevent', '66'],
check_return=True,
)), (self.call.device.RunShellCommand(['input', 'keyevent', '4'],
check_return=True)),
(self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand(['input', 'keyevent', '66'],
check_return=True)),
(self.call.device.RunShellCommand(['input', 'keyevent', '4'],
check_return=True)),
(self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.GoHome()
def testGoHome_alreadyFocused(self):
with self.assertCall(
self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True),
['mResumedActivity Launcher']):
self.device.GoHome()
def testGoHome_alreadyFocusedAlternateCase(self):
with self.assertCall(
self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True),
[' mResumedActivity .launcher/.']):
self.device.GoHome()
def testGoHome_obtainsFocusAfterGoingHome(self):
with self.assertCalls(
(self.call.device.RunShellCommand(['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True), []),
(self.call.device.RunShellCommand([
'am', 'start', '-W', '-a', 'android.intent.action.MAIN', '-c',
'android.intent.category.HOME'
],
check_return=True),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'
''), (self.call.device.RunShellCommand(
['dumpsys', 'activity', 'activities'],
check_return=True,
large_output=True), ['mResumedActivity Launcher'])):
self.device.GoHome()
class DeviceUtilsForceStopTest(DeviceUtilsTest):
def testForceStop(self):
with self.assertCalls(
(self.call.device.GetApplicationPids(TEST_PACKAGE), [1111]),
(self.call.device.RunShellCommand(['am', 'force-stop', TEST_PACKAGE],
check_return=True), ['Success'])):
self.device.ForceStop(TEST_PACKAGE)
def testForceStop_NoProcessFound(self):
with self.assertCall(self.call.device.GetApplicationPids(TEST_PACKAGE), []):
self.device.ForceStop(TEST_PACKAGE)
class DeviceUtilsClearApplicationStateTest(DeviceUtilsTest):
def testClearApplicationState_setPermissions(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '17'),
(self.call.device._GetApplicationPathsInternal('this.package.exists'),
['/data/app/this.package.exists.apk']),
(self.call.device.RunShellCommand(
['pm', 'clear', 'this.package.exists'], check_return=True),
['Success']),
(self.call.device.GrantPermissions('this.package.exists', ['p1']), [])):
self.device.ClearApplicationState(
'this.package.exists', permissions=['p1'])
def testClearApplicationState_packageDoesntExist(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '11'),
(self.call.device._GetApplicationPathsInternal('does.not.exist'), [])):
self.device.ClearApplicationState('does.not.exist')
def testClearApplicationState_packageDoesntExistOnAndroidJBMR2OrAbove(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '18'),
(self.call.device.RunShellCommand(
['pm', 'clear', 'this.package.does.not.exist'], check_return=True),
['Failed'])):
self.device.ClearApplicationState('this.package.does.not.exist')
def testClearApplicationState_packageExists(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '17'),
(self.call.device._GetApplicationPathsInternal('this.package.exists'),
['/data/app/this.package.exists.apk']),
(self.call.device.RunShellCommand(
['pm', 'clear', 'this.package.exists'], check_return=True),
['Success'])):
self.device.ClearApplicationState('this.package.exists')
def testClearApplicationState_packageExistsOnAndroidJBMR2OrAbove(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '18'),
(self.call.device.RunShellCommand(
['pm', 'clear', 'this.package.exists'], check_return=True),
['Success'])):
self.device.ClearApplicationState('this.package.exists')
class DeviceUtilsSendKeyEventTest(DeviceUtilsTest):
def testSendKeyEvent(self):
with self.assertCall(self.call.adb.Shell('input keyevent 66'), ''):
self.device.SendKeyEvent(66)
class DeviceUtilsPushChangedFilesIndividuallyTest(DeviceUtilsTest):
def testPushChangedFilesIndividually_empty(self):
test_files = []
with self.assertCalls():
self.device._PushChangedFilesIndividually(test_files)
def testPushChangedFilesIndividually_single(self):
test_files = [('/test/host/path', '/test/device/path')]
with self.assertCalls(self.call.adb.Push(*test_files[0])):
self.device._PushChangedFilesIndividually(test_files)
def testPushChangedFilesIndividually_multiple(self):
test_files = [('/test/host/path/file1', '/test/device/path/file1'),
('/test/host/path/file2', '/test/device/path/file2')]
with self.assertCalls(
self.call.adb.Push(*test_files[0]), self.call.adb.Push(*test_files[1])):
self.device._PushChangedFilesIndividually(test_files)
class DeviceUtilsPushChangedFilesZippedTest(DeviceUtilsTest):
def testPushChangedFilesZipped_noUnzipCommand(self):
test_files = [('/test/host/path/file1', '/test/device/path/file1')]
with self.assertCalls((self.call.device._MaybeInstallCommands(), False)):
self.assertFalse(
self.device._PushChangedFilesZipped(test_files, ['/test/dir']))
def _testPushChangedFilesZipped_spec(self, test_files, test_dirs):
@contextlib.contextmanager
def mock_zip_temp_dir():
yield '/test/temp/dir'
expected_cmd = ''.join([
'\n /data/local/tmp/bin/unzip %s &&',
' (for dir in %s\n do\n chmod -R 777 "$dir" || exit 1\n',
' done)\n'
]) % ('/sdcard/foo123.zip', ' '.join(test_dirs))
with self.assertCalls(
(self.call.device._MaybeInstallCommands(), True),
(mock.call.py_utils.tempfile_ext.NamedTemporaryDirectory(),
mock_zip_temp_dir), (mock.call.devil.utils.zip_utils.WriteZipFile(
'/test/temp/dir/tmp.zip', test_files)),
(mock.call.os.path.getsize('/test/temp/dir/tmp.zip'), 123),
(self.call.device.NeedsSU(), True),
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.zip'), MockTempFile('/sdcard/foo123.zip')),
self.call.adb.Push('/test/temp/dir/tmp.zip', '/sdcard/foo123.zip'),
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
self.call.device.WriteFile('/sdcard/temp-123.sh', expected_cmd),
(self.call.device.RunShellCommand(['source', '/sdcard/temp-123.sh'],
check_return=True,
as_root=True))):
self.assertTrue(
self.device._PushChangedFilesZipped(test_files, test_dirs))
def testPushChangedFilesZipped_single(self):
self._testPushChangedFilesZipped_spec(
[('/test/host/path/file1', '/test/device/path/file1')],
['/test/dir1'])
def testPushChangedFilesZipped_multiple(self):
self._testPushChangedFilesZipped_spec(
[('/test/host/path/file1', '/test/device/path/file1'),
('/test/host/path/file2', '/test/device/path/file2')],
['/test/dir1', '/test/dir2'])
class DeviceUtilsPathExistsTest(DeviceUtilsTest):
def testPathExists_pathExists(self):
with self.assertCall(
self.call.device.RunShellCommand(['test', '-e', '/path/file exists'],
as_root=False,
check_return=True,
timeout=10,
retries=0), []):
self.assertTrue(self.device.PathExists('/path/file exists'))
def testPathExists_multiplePathExists(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/path 1', '-a', '-e', '/path2'],
as_root=False,
check_return=True,
timeout=10,
retries=0), []):
self.assertTrue(self.device.PathExists(('/path 1', '/path2')))
def testPathExists_pathDoesntExist(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/path/file.not.exists'],
as_root=False,
check_return=True,
timeout=10,
retries=0), self.ShellError()):
self.assertFalse(self.device.PathExists('/path/file.not.exists'))
def testPathExists_asRoot(self):
with self.assertCall(
self.call.device.RunShellCommand(['test', '-e', '/root/path/exists'],
as_root=True,
check_return=True,
timeout=10,
retries=0), self.ShellError()):
self.assertFalse(
self.device.PathExists('/root/path/exists', as_root=True))
def testFileExists_pathDoesntExist(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/path/file.not.exists'],
as_root=False,
check_return=True,
timeout=10,
retries=0), self.ShellError()):
self.assertFalse(self.device.FileExists('/path/file.not.exists'))
class DeviceUtilsRemovePathTest(DeviceUtilsTest):
def testRemovePath_regular(self):
with self.assertCall(
self.call.device.RunShellCommand(['rm', 'some file'],
as_root=False,
check_return=True), []):
self.device.RemovePath('some file')
def testRemovePath_withForce(self):
with self.assertCall(
self.call.device.RunShellCommand(['rm', '-f', 'some file'],
as_root=False,
check_return=True), []):
self.device.RemovePath('some file', force=True)
def testRemovePath_recursively(self):
with self.assertCall(
self.call.device.RunShellCommand(['rm', '-r', '/remove/this/dir'],
as_root=False,
check_return=True), []):
self.device.RemovePath('/remove/this/dir', recursive=True)
def testRemovePath_withRoot(self):
with self.assertCall(
self.call.device.RunShellCommand(['rm', 'some file'],
as_root=True,
check_return=True), []):
self.device.RemovePath('some file', as_root=True)
def testRemovePath_manyPaths(self):
with self.assertCall(
self.call.device.RunShellCommand(['rm', 'eeny', 'meeny', 'miny', 'moe'],
as_root=False,
check_return=True), []):
self.device.RemovePath(['eeny', 'meeny', 'miny', 'moe'])
class DeviceUtilsPullFileTest(DeviceUtilsTest):
def testPullFile_existsOnDevice(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCall(
self.call.adb.Pull('/data/app/test.file.exists',
'/test/file/host/path')):
self.device.PullFile('/data/app/test.file.exists',
'/test/file/host/path')
def testPullFile_doesntExistOnDevice(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCall(
self.call.adb.Pull('/data/app/test.file.does.not.exist',
'/test/file/host/path'),
self.CommandError('remote object does not exist')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.PullFile('/data/app/test.file.does.not.exist',
'/test/file/host/path')
def testPullFile_asRoot(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCalls(
(self.call.device.NeedsSU(), True), (self.call.device.PathExists(
'/this/file/can.be.read.with.su', as_root=True), True),
(mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device.RunShellCommand(
'SRC=/this/file/can.be.read.with.su DEST=/sdcard/tmp/on.device;'
'cp "$SRC" "$DEST" && chmod 666 "$DEST"',
shell=True,
as_root=True,
check_return=True), (self.call.adb.Pull('/sdcard/tmp/on.device',
'/test/file/host/path'))):
self.device.PullFile(
'/this/file/can.be.read.with.su',
'/test/file/host/path',
as_root=True)
def testPullFile_asRootDoesntExistOnDevice(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCalls(
(self.call.device.NeedsSU(), True), (self.call.device.PathExists(
'/data/app/test.file.does.not.exist', as_root=True), False)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.PullFile(
'/data/app/test.file.does.not.exist',
'/test/file/host/path',
as_root=True)
class DeviceUtilsReadFileTest(DeviceUtilsTest):
def testReadFileWithPull_success(self):
tmp_host_dir = '/tmp/dir/on.host/'
tmp_host = MockTempFile('/tmp/dir/on.host/tmp_ReadFileWithPull')
tmp_host.file.read.return_value = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.mkdtemp(), tmp_host_dir),
(self.call.adb.Pull('/path/to/device/file', mock.ANY)),
(mock.call.__builtin__.open(mock.ANY, 'rb'), tmp_host) if six.PY2 else \
(mock.call.builtins.open(mock.ANY, 'rb'), tmp_host),
(mock.call.os.path.exists(tmp_host_dir), True),
(mock.call.shutil.rmtree(tmp_host_dir), None)):
self.assertEqual('some interesting contents',
self.device._ReadFileWithPull('/path/to/device/file'))
tmp_host.file.read.assert_called_once_with()
def _check_ReadFileWithEncodingErrors(self, encoding, errors):
tmp_host_dir = '/tmp/dir/on.host/'
tmp_host = MockTempFile('/tmp/dir/on.host/tmp_ReadFileWithEncodingErrors')
file_content = b'file with all ' + bytes(bytearray(range(256))) + b' bytes'
if six.PY2 or encoding is None:
expected_content = file_content
else:
expected_content = file_content.decode(encoding, errors)
self.assertNotEqual(file_content, expected_content)
tmp_host.file.read.return_value = file_content
with self.assertCalls(
(mock.call.tempfile.mkdtemp(), tmp_host_dir),
(self.call.adb.Pull('/path/to/device/file', mock.ANY)),
(mock.call.__builtin__.open(mock.ANY, 'rb'), tmp_host) if six.PY2 else \
(mock.call.builtins.open(mock.ANY, 'rb'), tmp_host),
(mock.call.os.path.exists(tmp_host_dir), True),
(mock.call.shutil.rmtree(tmp_host_dir), None)):
self.assertEqual(expected_content,
self.device._ReadFileWithPull('/path/to/device/file',
encoding, errors))
tmp_host.file.read.assert_called_once_with()
def testReadFile_AsBytes(self):
self._check_ReadFileWithEncodingErrors(None, 'replace')
def testReadFile_NotUtf8_Replace(self):
self._check_ReadFileWithEncodingErrors('utf8', 'replace')
def testReadFile_NotUtf8_Ignore(self):
self._check_ReadFileWithEncodingErrors('utf8', 'ignore')
def testReadFile_NotCp1251_Replace(self):
self._check_ReadFileWithEncodingErrors('cp1251', 'replace')
def testReadFile_NotCp1251_Ignore(self):
self._check_ReadFileWithEncodingErrors('cp1251', 'ignore')
def testReadFileWithPull_rejected(self):
tmp_host_dir = '/tmp/dir/on.host/'
with self.assertCalls((mock.call.tempfile.mkdtemp(), tmp_host_dir),
(self.call.adb.Pull('/path/to/device/file', mock.ANY),
self.CommandError()),
(mock.call.os.path.exists(tmp_host_dir), True),
(mock.call.shutil.rmtree(tmp_host_dir), None)):
with self.assertRaises(device_errors.CommandFailedError):
self.device._ReadFileWithPull('/path/to/device/file')
def testReadFile_withSU_zeroSize(self):
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device.FileSize(
'/this/file/has/zero/size', as_root=True), 0),
(mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device.RunShellCommand(
'SRC=/this/file/has/zero/size DEST=/sdcard/tmp/on.device;'
'cp "$SRC" "$DEST" && chmod 666 "$DEST"',
shell=True,
as_root=True,
check_return=True),
(self.call.device._ReadFileWithPull('/sdcard/tmp/on.device',
'utf8', 'replace'),
'but it has contents\n')):
self.assertEqual('but it has contents\n',
self.device.ReadFile('/this/file/has/zero/size',
as_root=True))
def testReadFile_withSU(self):
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device.FileSize(
'/this/file/can.be.read.with.su', as_root=True), 256),
(self.call.device.RunShellCommand(
['cat', '/this/file/can.be.read.with.su'],
as_root=True,
check_return=True), ['this is a test file', 'read with su'])):
self.assertEqual(
'this is a test file\nread with su\n',
self.device.ReadFile('/this/file/can.be.read.with.su', as_root=True))
def testReadFile_withSU_doesNotExist(self):
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device.FileSize('/this/file/does.not.exist', as_root=True),
self.CommandError('File does not exist'))):
with self.assertRaises(device_errors.CommandFailedError):
self.device.ReadFile('/this/file/does.not.exist', as_root=True)
def testReadFile_withPull(self):
contents = 'a' * 123456
with self.assertCalls(
(self.call.device._ReadFileWithPull('/read/this/big/test/file',
'utf8', 'replace'),
contents)):
self.assertEqual(contents,
self.device.ReadFile('/read/this/big/test/file'))
def testReadFile_withPullAndSU(self):
contents = 'b' * 123456
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device.FileSize(
'/this/big/file/can.be.read.with.su', as_root=True), 123456),
(mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device.RunShellCommand(
'SRC=/this/big/file/can.be.read.with.su DEST=/sdcard/tmp/on.device;'
'cp "$SRC" "$DEST" && chmod 666 "$DEST"',
shell=True,
as_root=True,
check_return=True),
(self.call.device._ReadFileWithPull('/sdcard/tmp/on.device',
'utf8', 'replace'),
contents)):
self.assertEqual(
contents,
self.device.ReadFile(
'/this/big/file/can.be.read.with.su', as_root=True))
def testReadFile_forcePull(self):
contents = 'a' * 123456
with self.assertCall(
self.call.device._ReadFileWithPull('/read/this/big/test/file',
'utf8', 'replace'),
contents):
self.assertEqual(
contents,
self.device.ReadFile('/read/this/big/test/file', force_pull=True))
class DeviceUtilsWriteFileTest(DeviceUtilsTest):
def testWriteFileWithPush_success(self):
tmp_host = MockTempFile('/tmp/file/on.host')
contents = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(mode='w+'), tmp_host),
self.call.adb.Push('/tmp/file/on.host', '/path/to/device/file')):
self.device._WriteFileWithPush('/path/to/device/file', contents)
tmp_host.file.write.assert_called_once_with(contents)
def testWriteFileWithPush_rejected(self):
tmp_host = MockTempFile('/tmp/file/on.host')
contents = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(mode='w+'), tmp_host),
(self.call.adb.Push('/tmp/file/on.host',
'/path/to/device/file'), self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device._WriteFileWithPush('/path/to/device/file', contents)
def testWriteFile_withPush(self):
contents = 'some large contents ' * 26 # 20 * 26 = 520 chars
with self.assertCalls(
self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
self.device.WriteFile('/path/to/device/file', contents)
def testWriteFile_withPushForced(self):
contents = 'tiny contents'
with self.assertCalls(
self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
self.device.WriteFile('/path/to/device/file', contents, force_push=True)
def testWriteFile_withPushAndSU(self):
contents = 'some large contents ' * 26 # 20 * 26 = 520 chars
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(mock.call.devil.android.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device._WriteFileWithPush('/sdcard/tmp/on.device', contents),
self.call.device.RunShellCommand(
['cp', '/sdcard/tmp/on.device', '/path/to/device/file'],
as_root=True,
check_return=True)):
self.device.WriteFile('/path/to/device/file', contents, as_root=True)
def testWriteFile_withEcho(self):
with self.assertCall(
self.call.adb.Shell("echo -n the.contents > /test/file/to.write"), ''):
self.device.WriteFile('/test/file/to.write', 'the.contents')
def testWriteFile_withEchoAndQuotes(self):
with self.assertCall(
self.call.adb.Shell("echo -n 'the contents' > '/test/file/to write'"),
''):
self.device.WriteFile('/test/file/to write', 'the contents')
def testWriteFile_withEchoAndSU(self):
expected_cmd_without_su = "sh -c 'echo -n contents > /test/file'"
expected_cmd = 'su -c %s' % expected_cmd_without_su
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.device._Su(expected_cmd_without_su), expected_cmd),
(self.call.adb.Shell(expected_cmd), '')):
self.device.WriteFile('/test/file', 'contents', as_root=True)
class DeviceUtilsStatDirectoryTest(DeviceUtilsTest):
# Note: Also tests ListDirectory in testStatDirectory_fileList.
EXAMPLE_LS_OUTPUT = [
'total 12345',
'drwxr-xr-x 19 root root 0 1970-04-06 18:03 .',
'drwxr-xr-x 19 root root 0 1970-04-06 18:03 ..',
'drwxr-xr-x 6 root root 1970-01-01 00:00 some_dir',
'-rw-r--r-- 1 root root 723 1971-01-01 07:04 some_file',
'-rw-r----- 1 root root 327 2009-02-13 23:30 My Music File',
# Some Android versions escape spaces in file names
'-rw-rw-rw- 1 root root 0 2018-01-11 13:35 Local\\ State',
# Older Android versions do not print st_nlink
'lrwxrwxrwx root root 1970-01-01 00:00 lnk -> /a/path',
'srwxrwx--- system system 2016-05-31 17:25 a_socket1',
'drwxrwxrwt system misc 1970-11-23 02:25 tmp',
'drwxr-s--- system shell 1970-11-23 02:24 my_cmd',
'cr--r----- root system 10, 183 1971-01-01 07:04 random',
'brw------- root root 7, 0 1971-01-01 07:04 block_dev',
'-rwS------ root shell 157404 2015-04-13 15:44 silly',
]
FILENAMES = [
'some_dir', 'some_file', 'My Music File', 'Local State', 'lnk',
'a_socket1', 'tmp', 'my_cmd', 'random', 'block_dev', 'silly'
]
def getStatEntries(self, path_given='/', path_listed='/'):
with self.assertCall(
self.call.device.RunShellCommand(['ls', '-a', '-l', path_listed],
check_return=True,
as_root=False,
env={'TZ': 'utc'}),
self.EXAMPLE_LS_OUTPUT):
entries = self.device.StatDirectory(path_given)
return {f['filename']: f for f in entries}
def getListEntries(self):
with self.assertCall(
self.call.device.RunShellCommand(['ls', '-a', '-l', '/'],
check_return=True,
as_root=False,
env={'TZ': 'utc'}),
self.EXAMPLE_LS_OUTPUT):
return self.device.ListDirectory('/')
def testStatDirectory_forceTrailingSlash(self):
self.getStatEntries(path_given='/foo/bar/', path_listed='/foo/bar/')
self.getStatEntries(path_given='/foo/bar', path_listed='/foo/bar/')
def testStatDirectory_fileList(self):
self.safeAssertItemsEqual(self.getStatEntries().keys(), self.FILENAMES)
self.safeAssertItemsEqual(self.getListEntries(), self.FILENAMES)
def testStatDirectory_fileModes(self):
expected_modes = (
('some_dir', stat.S_ISDIR),
('some_file', stat.S_ISREG),
('lnk', stat.S_ISLNK),
('a_socket1', stat.S_ISSOCK),
('block_dev', stat.S_ISBLK),
('random', stat.S_ISCHR),
)
entries = self.getStatEntries()
for filename, check in expected_modes:
self.assertTrue(check(entries[filename]['st_mode']))
def testStatDirectory_filePermissions(self):
should_have = (
('some_file', stat.S_IWUSR), # Owner can write.
('tmp', stat.S_IXOTH), # Others can execute.
('tmp', stat.S_ISVTX), # Has sticky bit.
('my_cmd', stat.S_ISGID), # Has set-group-ID bit.
('silly', stat.S_ISUID), # Has set UID bit.
)
should_not_have = (
('some_file', stat.S_IWOTH), # Others can't write.
('block_dev', stat.S_IRGRP), # Group can't read.
('silly', stat.S_IXUSR), # Owner can't execute.
)
entries = self.getStatEntries()
for filename, bit in should_have:
self.assertTrue(entries[filename]['st_mode'] & bit)
for filename, bit in should_not_have:
self.assertFalse(entries[filename]['st_mode'] & bit)
def testStatDirectory_numHardLinks(self):
entries = self.getStatEntries()
self.assertEqual(entries['some_dir']['st_nlink'], 6)
self.assertEqual(entries['some_file']['st_nlink'], 1)
self.assertFalse('st_nlink' in entries['tmp'])
def testStatDirectory_fileOwners(self):
entries = self.getStatEntries()
self.assertEqual(entries['some_dir']['st_owner'], 'root')
self.assertEqual(entries['my_cmd']['st_owner'], 'system')
self.assertEqual(entries['my_cmd']['st_group'], 'shell')
self.assertEqual(entries['tmp']['st_group'], 'misc')
def testStatDirectory_fileSize(self):
entries = self.getStatEntries()
self.assertEqual(entries['some_file']['st_size'], 723)
self.assertEqual(entries['My Music File']['st_size'], 327)
# Sizes are sometimes not reported for non-regular files, don't try to
# guess the size in those cases.
self.assertFalse('st_size' in entries['some_dir'])
def testStatDirectory_fileDateTime(self):
entries = self.getStatEntries()
self.assertEqual(entries['some_dir']['st_mtime'], 0) # Epoch!
self.assertEqual(entries['My Music File']['st_mtime'], 1234567800)
def testStatDirectory_deviceType(self):
entries = self.getStatEntries()
self.assertEqual(entries['random']['st_rdev_pair'], (10, 183))
self.assertEqual(entries['block_dev']['st_rdev_pair'], (7, 0))
def testStatDirectory_symbolicLinks(self):
entries = self.getStatEntries()
self.assertEqual(entries['lnk']['symbolic_link_to'], '/a/path')
for d in entries.values():
self.assertEqual('symbolic_link_to' in d, stat.S_ISLNK(d['st_mode']))
class DeviceUtilsStatPathTest(DeviceUtilsTest):
EXAMPLE_DIRECTORY = [{
'filename': 'foo.txt',
'st_size': 123,
'st_time': 456
}, {
'filename': 'some_dir',
'st_time': 0
}]
INDEX = {e['filename']: e for e in EXAMPLE_DIRECTORY}
def testStatPath_file(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
self.assertEqual(self.INDEX['foo.txt'],
self.device.StatPath('/data/local/tmp/foo.txt'))
def testStatPath_directory(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
self.assertEqual(self.INDEX['some_dir'],
self.device.StatPath('/data/local/tmp/some_dir'))
def testStatPath_directoryWithTrailingSlash(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
self.assertEqual(self.INDEX['some_dir'],
self.device.StatPath('/data/local/tmp/some_dir/'))
def testStatPath_doesNotExist(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
with self.assertRaises(device_errors.CommandFailedError):
self.device.StatPath('/data/local/tmp/does.not.exist.txt')
class DeviceUtilsFileSizeTest(DeviceUtilsTest):
EXAMPLE_DIRECTORY = [{
'filename': 'foo.txt',
'st_size': 123,
'st_mtime': 456
}, {
'filename': 'some_dir',
'st_mtime': 0
}]
def testFileSize_file(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
self.assertEqual(123, self.device.FileSize('/data/local/tmp/foo.txt'))
def testFileSize_doesNotExist(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
with self.assertRaises(device_errors.CommandFailedError):
self.device.FileSize('/data/local/tmp/does.not.exist.txt')
def testFileSize_directoryWithNoSize(self):
with self.assertCall(
self.call.device.StatDirectory('/data/local/tmp', as_root=False),
self.EXAMPLE_DIRECTORY):
with self.assertRaises(device_errors.CommandFailedError):
self.device.FileSize('/data/local/tmp/some_dir')
class DeviceUtilsSetJavaAssertsTest(DeviceUtilsTest):
def testSetJavaAsserts_enable(self):
with self.assertCalls(
(self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'),
self.call.device.WriteFile(
self.device.LOCAL_PROPERTIES_PATH,
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'
'dalvik.vm.enableassertions=all\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), ''),
self.call.device.SetProp('dalvik.vm.enableassertions', 'all')):
self.assertTrue(self.device.SetJavaAsserts(True))
def testSetJavaAsserts_disable(self):
with self.assertCalls(
(self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
self.call.device.WriteFile(
self.device.LOCAL_PROPERTIES_PATH,
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all'),
self.call.device.SetProp('dalvik.vm.enableassertions', '')):
self.assertTrue(self.device.SetJavaAsserts(False))
def testSetJavaAsserts_alreadyEnabled(self):
with self.assertCalls(
(self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all')):
self.assertFalse(self.device.SetJavaAsserts(True))
def testSetJavaAsserts_malformedLocalProp(self):
with self.assertCalls(
(self.call.device.ReadFile(self.device.LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'malformed_property\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all')):
self.assertFalse(self.device.SetJavaAsserts(True))
class DeviceUtilsEnsureCacheInitializedTest(DeviceUtilsTest):
def testEnsureCacheInitialized_noCache_success(self):
self.assertIsNone(self.device._cache['token'])
with self.assertCall(
self.call.device.RunShellCommand(
AnyStringWith('getprop'),
shell=True,
check_return=True,
large_output=True), ['/sdcard', 'TOKEN']):
self.device._EnsureCacheInitialized()
self.assertIsNotNone(self.device._cache['token'])
def testEnsureCacheInitialized_noCache_failure(self):
self.assertIsNone(self.device._cache['token'])
with self.assertCall(
self.call.device.RunShellCommand(
AnyStringWith('getprop'),
shell=True,
check_return=True,
large_output=True), self.TimeoutError()):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device._EnsureCacheInitialized()
self.assertIsNone(self.device._cache['token'])
def testEnsureCacheInitialized_cache(self):
self.device._cache['token'] = 'TOKEN'
with self.assertCalls():
self.device._EnsureCacheInitialized()
self.assertIsNotNone(self.device._cache['token'])
class DeviceUtilsGetPropTest(DeviceUtilsTest):
def testGetProp_exists(self):
with self.assertCall(
self.call.device.RunShellCommand(['getprop', 'test.property'],
check_return=True,
single_line=True,
timeout=self.device._default_timeout,
retries=self.device._default_retries),
'property_value'):
self.assertEqual('property_value', self.device.GetProp('test.property'))
def testGetProp_doesNotExist(self):
with self.assertCall(
self.call.device.RunShellCommand(['getprop', 'property.does.not.exist'],
check_return=True,
single_line=True,
timeout=self.device._default_timeout,
retries=self.device._default_retries),
''):
self.assertEqual('', self.device.GetProp('property.does.not.exist'))
def testGetProp_cachedRoProp(self):
with self.assertCalls(
self.EnsureCacheInitialized(props=['[ro.build.type]: [userdebug]'])):
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type', cache=True))
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type', cache=True))
class DeviceUtilsSetPropTest(DeviceUtilsTest):
def testSetProp(self):
with self.assertCall(
self.call.device.RunShellCommand(
['setprop', 'test.property', 'test value'], check_return=True)):
self.device.SetProp('test.property', 'test value')
def testSetProp_check_succeeds(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['setprop', 'test.property', 'new_value'], check_return=True)),
(self.call.device.GetProp('test.property', cache=False), 'new_value')):
self.device.SetProp('test.property', 'new_value', check=True)
def testSetProp_check_fails(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['setprop', 'test.property', 'new_value'], check_return=True)),
(self.call.device.GetProp('test.property', cache=False), 'old_value')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.SetProp('test.property', 'new_value', check=True)
class DeviceUtilsListProcessesTest(DeviceUtilsTest):
def setUp(self):
super(DeviceUtilsListProcessesTest, self).setUp()
self.sample_output = [
'USER PID PPID VSIZE RSS WCHAN PC NAME',
'user 1001 100 1024 1024 ffffffff 00000000 one.match',
'user 1002 100 1024 1024 ffffffff 00000000 two.match',
'user 1003 101 1024 1024 ffffffff 00000000 three.match',
'user 1234 101 1024 1024 ffffffff 00000000 my$process',
'user 1236 100 1024 1024 ffffffff 00000000 foo',
'user 1578 1236 1024 1024 ffffffff 00000000 foo',
]
def _grepOutput(self, substring):
return [line for line in self.sample_output if substring in line]
def testListProcesses_sdkGreaterThanNougatMR1(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=(version_codes.NOUGAT_MR1 + 1)):
with self.patch_call(self.call.device.build_id, return_value='ZZZ99Z'):
with self.assertCall(
self.call.device._RunPipedShellCommand(
'ps -e | grep -F example.process'), []):
self.device.ListProcesses('example.process')
def testListProcesses_noMatches(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F does.not.match'),
self._grepOutput('does.not.match')):
self.assertEqual([], self.device.ListProcesses('does.not.match'))
def testListProcesses_oneMatch(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F one.match'),
self._grepOutput('one.match')):
self.assertEqual(
Processes(('one.match', 1001, 100)),
self.device.ListProcesses('one.match'))
def testListProcesses_multipleMatches(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F match'),
self._grepOutput('match')):
self.assertEqual(
Processes(('one.match', 1001, 100), ('two.match', 1002, 100),
('three.match', 1003, 101)),
self.device.ListProcesses('match'))
def testListProcesses_quotable(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand("ps | grep -F 'my$process'"),
self._grepOutput('my$process')):
self.assertEqual(
Processes(('my$process', 1234, 101)),
self.device.ListProcesses('my$process'))
# Tests for the GetPids wrapper interface.
def testGetPids_multipleInstances(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F foo'),
self._grepOutput('foo')):
self.assertEqual({'foo': ['1236', '1578']}, self.device.GetPids('foo'))
def testGetPids_allProcesses(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device.RunShellCommand(['ps'],
check_return=True,
large_output=True),
self.sample_output):
self.assertEqual({
'one.match': ['1001'],
'two.match': ['1002'],
'three.match': ['1003'],
'my$process': ['1234'],
'foo': ['1236', '1578']
}, self.device.GetPids())
# Tests for the GetApplicationPids wrapper interface.
def testGetApplicationPids_notFound(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F match'),
self._grepOutput('match')):
# No PIDs found, process name should be exact match.
self.assertEqual([], self.device.GetApplicationPids('match'))
def testGetApplicationPids_foundOne(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F one.match'),
self._grepOutput('one.match')):
self.assertEqual([1001], self.device.GetApplicationPids('one.match'))
def testGetApplicationPids_foundMany(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F foo'),
self._grepOutput('foo')):
self.assertEqual([1236, 1578], self.device.GetApplicationPids('foo'))
def testGetApplicationPids_atMostOneNotFound(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F match'),
self._grepOutput('match')):
# No PIDs found, process name should be exact match.
self.assertEqual(
None, self.device.GetApplicationPids('match', at_most_one=True))
def testGetApplicationPids_atMostOneFound(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F one.match'),
self._grepOutput('one.match')):
self.assertEqual(
1001, self.device.GetApplicationPids('one.match', at_most_one=True))
def testGetApplicationPids_atMostOneFoundTooMany(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertRaises(device_errors.CommandFailedError):
with self.assertCall(
self.call.device._RunPipedShellCommand('ps | grep -F foo'),
self._grepOutput('foo')):
self.device.GetApplicationPids('foo', at_most_one=True)
class DeviceUtilsGetSetEnforce(DeviceUtilsTest):
def testGetEnforce_Enforcing(self):
with self.assertCall(self.call.adb.Shell('getenforce'), 'Enforcing'):
self.assertEqual(True, self.device.GetEnforce())
def testGetEnforce_Permissive(self):
with self.assertCall(self.call.adb.Shell('getenforce'), 'Permissive'):
self.assertEqual(False, self.device.GetEnforce())
def testGetEnforce_Disabled(self):
with self.assertCall(self.call.adb.Shell('getenforce'), 'Disabled'):
self.assertEqual(None, self.device.GetEnforce())
def testSetEnforce_Enforcing(self):
with self.assertCalls((self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 1'), '')):
self.device.SetEnforce(enabled=True)
def testSetEnforce_Permissive(self):
with self.assertCalls((self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 0'), '')):
self.device.SetEnforce(enabled=False)
def testSetEnforce_EnforcingWithInt(self):
with self.assertCalls((self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 1'), '')):
self.device.SetEnforce(enabled=1)
def testSetEnforce_PermissiveWithInt(self):
with self.assertCalls((self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 0'), '')):
self.device.SetEnforce(enabled=0)
def testSetEnforce_EnforcingWithStr(self):
with self.assertCalls((self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 1'), '')):
self.device.SetEnforce(enabled='1')
def testSetEnforce_PermissiveWithStr(self):
with self.assertCalls((self.call.device.NeedsSU(), False),
(self.call.adb.Shell('setenforce 0'), '')):
self.device.SetEnforce(enabled='0') # Not recommended but it works!
class DeviceUtilsGetWebViewUpdateServiceDumpTest(DeviceUtilsTest):
def testGetWebViewUpdateServiceDump_success(self):
# Some of the lines of adb shell dumpsys webviewupdate:
dumpsys_lines = [
'Fallback logic enabled: true',
('Current WebView package (name, version): '
'(com.android.chrome, 61.0.3163.98)'),
'Minimum WebView version code: 12345',
'WebView packages:',
('Valid package com.android.chrome (versionName: '
'61.0.3163.98, versionCode: 1, targetSdkVersion: 26) is '
'installed/enabled for all users'),
('Valid package com.google.android.webview (versionName: '
'58.0.3029.125, versionCode: 1, targetSdkVersion: 26) is NOT '
'installed/enabled for all users'),
('Invalid package com.google.android.apps.chrome (versionName: '
'56.0.2924.122, versionCode: 2, targetSdkVersion: 25), reason: SDK '
'version too low'),
('com.chrome.canary is NOT installed.'),
]
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.OREO):
with self.assertCall(
self.call.adb.Shell('dumpsys webviewupdate'),
'\n'.join(dumpsys_lines)):
update = self.device.GetWebViewUpdateServiceDump()
self.assertTrue(update['FallbackLogicEnabled'])
self.assertEqual('com.android.chrome', update['CurrentWebViewPackage'])
self.assertEqual(12345, update['MinimumWebViewVersionCode'])
# Order isn't really important, and we shouldn't have duplicates, so we
# convert to sets.
expected = {
'com.android.chrome', 'com.google.android.webview',
'com.google.android.apps.chrome', 'com.chrome.canary'
}
self.assertSetEqual(expected, set(update['WebViewPackages'].keys()))
self.assertEqual('is installed/enabled for all users',
update['WebViewPackages']['com.android.chrome'])
self.assertEqual(
'is NOT installed/enabled for all users',
update['WebViewPackages']['com.google.android.webview'])
self.assertEqual(
'reason: SDK version too low',
update['WebViewPackages']['com.google.android.apps.chrome'])
self.assertEqual('is NOT installed.',
update['WebViewPackages']['com.chrome.canary'])
def testGetWebViewUpdateServiceDump_missingkey(self):
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.OREO):
with self.assertCall(
self.call.adb.Shell('dumpsys webviewupdate'),
'Fallback logic enabled: true'):
update = self.device.GetWebViewUpdateServiceDump()
self.assertEqual(True, update['FallbackLogicEnabled'])
def testGetWebViewUpdateServiceDump_noop(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.NOUGAT_MR1):
with self.assertCalls():
self.device.GetWebViewUpdateServiceDump()
def testGetWebViewUpdateServiceDump_noPackage(self):
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.OREO):
with self.assertCall(
self.call.adb.Shell('dumpsys webviewupdate'),
'Fallback logic enabled: true\n'
'Current WebView package is null'):
update = self.device.GetWebViewUpdateServiceDump()
self.assertEqual(True, update['FallbackLogicEnabled'])
self.assertEqual(None, update['CurrentWebViewPackage'])
class DeviceUtilsSetWebViewImplementationTest(DeviceUtilsTest):
def testSetWebViewImplementation_success(self):
with self.patch_call(
self.call.device.IsApplicationInstalled, return_value=True):
with self.assertCall(
self.call.adb.Shell(
'cmd webviewupdate set-webview-implementation foo.org'),
'Success'):
self.device.SetWebViewImplementation('foo.org')
def testSetWebViewImplementation_uninstalled(self):
with self.patch_call(
self.call.device.IsApplicationInstalled, return_value=False):
with self.assertRaises(device_errors.CommandFailedError) as cfe:
self.device.SetWebViewImplementation('foo.org')
self.assertIn('is not installed', cfe.exception.message)
def _testSetWebViewImplementationHelper(self, mock_dump_sys,
exception_message_substr):
with self.patch_call(
self.call.device.IsApplicationInstalled, return_value=True):
with self.assertCall(
self.call.adb.Shell(
'cmd webviewupdate set-webview-implementation foo.org'), 'Oops!'):
with self.patch_call(
self.call.device.GetWebViewUpdateServiceDump,
return_value=mock_dump_sys):
with self.assertRaises(device_errors.CommandFailedError) as cfe:
self.device.SetWebViewImplementation('foo.org')
self.assertIn(exception_message_substr, cfe.exception.message)
def testSetWebViewImplementation_notInProviderList(self):
mock_dump_sys = {
'WebViewPackages': {
'some.package': 'any reason',
'other.package': 'any reason',
}
}
self._testSetWebViewImplementationHelper(mock_dump_sys, 'provider list')
def testSetWebViewImplementation_notEnabled(self):
mock_dump_sys = {
'WebViewPackages': {
'foo.org': 'is NOT installed/enabled for all users',
}
}
self._testSetWebViewImplementationHelper(mock_dump_sys, 'is disabled')
def testSetWebViewImplementation_missingManifestTag(self):
mock_dump_sys = {
'WebViewPackages': {
'foo.org': 'No WebView-library manifest flag',
}
}
self._testSetWebViewImplementationHelper(mock_dump_sys,
'WebView native library')
def testSetWebViewImplementation_lowTargetSdkVersion_finalizedSdk(self):
mock_dump_sys = {'WebViewPackages': {'foo.org': 'SDK version too low', }}
with self.assertCalls(
(self.call.device.GetApplicationTargetSdk('foo.org'), '29'),
(self.call.device.GetProp('ro.build.version.preview_sdk'), '0')):
with self.patch_call(self.call.device.build_version_sdk, return_value=30):
self._testSetWebViewImplementationHelper(
mock_dump_sys,
"has targetSdkVersion '29', but valid WebView providers must "
"target >= 30 on this device")
def testSetWebViewImplementation_lowTargetSdkVersion_prefinalizedSdk(self):
mock_dump_sys = {'WebViewPackages': {'foo.org': 'SDK version too low', }}
with self.assertCalls(
(self.call.device.GetApplicationTargetSdk('foo.org'), '29'),
(self.call.device.GetProp('ro.build.version.preview_sdk'), '1'),
(self.call.device.GetProp('ro.build.version.codename'), 'R')):
with self.patch_call(self.call.device.build_version_sdk, return_value=29):
self._testSetWebViewImplementationHelper(
mock_dump_sys,
"targets a finalized SDK ('29'), but valid WebView providers must "
"target a pre-finalized SDK ('R') on this device")
def testSetWebViewImplementation_lowVersionCode(self):
mock_dump_sys = {
'MinimumWebViewVersionCode': 12345,
'WebViewPackages': {
'foo.org': 'Version code too low',
}
}
self._testSetWebViewImplementationHelper(mock_dump_sys,
'higher versionCode')
def testSetWebViewImplementation_invalidSignature(self):
mock_dump_sys = {'WebViewPackages': {'foo.org': 'Incorrect signature'}}
self._testSetWebViewImplementationHelper(mock_dump_sys,
'signed with release keys')
class DeviceUtilsSetWebViewFallbackLogicTest(DeviceUtilsTest):
def testSetWebViewFallbackLogic_False_success(self):
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.NOUGAT):
with self.assertCall(
self.call.adb.Shell('cmd webviewupdate enable-redundant-packages'),
'Success'):
self.device.SetWebViewFallbackLogic(False)
def testSetWebViewFallbackLogic_True_success(self):
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.NOUGAT):
with self.assertCall(
self.call.adb.Shell('cmd webviewupdate disable-redundant-packages'),
'Success'):
self.device.SetWebViewFallbackLogic(True)
def testSetWebViewFallbackLogic_failure(self):
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.NOUGAT):
with self.assertCall(
self.call.adb.Shell('cmd webviewupdate enable-redundant-packages'),
'Oops!'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.SetWebViewFallbackLogic(False)
def testSetWebViewFallbackLogic_beforeNougat(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
with self.assertCalls():
self.device.SetWebViewFallbackLogic(False)
def testSetWebViewFallbackLogic_afterPie(self):
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.Q):
with self.assertCalls():
self.device.SetWebViewFallbackLogic(False)
class DeviceUtilsTakeScreenshotTest(DeviceUtilsTest):
def testTakeScreenshot_fileNameProvided(self):
with self.assertCalls(
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.png'), MockTempFile('/tmp/path/temp-123.png')),
(self.call.adb.Shell('/system/bin/screencap -p /tmp/path/temp-123.png'),
''),
self.call.device.PullFile('/tmp/path/temp-123.png',
'/test/host/screenshot.png')):
self.device.TakeScreenshot('/test/host/screenshot.png')
class DeviceUtilsDismissCrashDialogIfNeededTest(DeviceUtilsTest):
def testDismissCrashDialogIfNeeded_crashedPackageNotFound(self):
sample_dumpsys_output = '''
WINDOW MANAGER WINDOWS (dumpsys window windows)
Window #11 Window{f8b647a u0 SearchPanel}:
mDisplayId=0 mSession=Session{8 94:122} mClient=android.os.BinderProxy@1ba5
mOwnerUid=100 mShowToOwnerOnly=false package=com.android.systemui appop=NONE
mAttrs=WM.LayoutParams{(0,0)(fillxfill) gr=#53 sim=#31 ty=2024 fl=100
Requested w=1080 h=1920 mLayoutSeq=426
mBaseLayer=211000 mSubLayer=0 mAnimLayer=211000+0=211000 mLastLayer=211000
'''
with self.assertCalls(
(self.call.device.RunShellCommand(['dumpsys', 'window', 'windows'],
check_return=True,
large_output=True),
sample_dumpsys_output.split('\n'))):
package_name = self.device.DismissCrashDialogIfNeeded()
self.assertIsNone(package_name)
def testDismissCrashDialogIfNeeded_crashedPackageFound_sdk_preN(self):
sample_dumpsys_output = '''
WINDOW MANAGER WINDOWS (dumpsys window windows)
Window #11 Window{f8b647a u0 SearchPanel}:
mDisplayId=0 mSession=Session{8 94:122} mClient=android.os.BinderProxy@1ba5
mOwnerUid=102 mShowToOwnerOnly=false package=com.android.systemui appop=NONE
mAttrs=WM.LayoutParams{(0,0)(fillxfill) gr=#53 sim=#31 ty=2024 fl=100
Requested w=1080 h=1920 mLayoutSeq=426
mBaseLayer=211000 mSubLayer=0 mAnimLayer=211000+0=211000 mLastLayer=211000
mHasPermanentDpad=false
mCurrentFocus=Window{3a27740f u0 Application Error: com.android.chrome}
mFocusedApp=AppWindowToken{470af6f token=Token{272ec24e ActivityRecord{t894}}}
'''
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'],
check_return=True,
large_output=True), sample_dumpsys_output.split('\n')),
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '23'),
(self.call.device.RunShellCommand(['input', 'keyevent', '22'],
check_return=True)),
(self.call.device.RunShellCommand(['input', 'keyevent', '22'],
check_return=True)),
(self.call.device.RunShellCommand(['input', 'keyevent', '66'],
check_return=True)),
(self.call.device.RunShellCommand(['dumpsys', 'window', 'windows'],
check_return=True,
large_output=True), [])):
package_name = self.device.DismissCrashDialogIfNeeded()
self.assertEqual(package_name, 'com.android.chrome')
def testDismissCrashDialogIfNeeded_crashedPackageFound_sdk_N(self):
sample_dumpsys_output = '''
WINDOW MANAGER WINDOWS (dumpsys window windows)
Window #11 Window{f8b647a u0 SearchPanel}:
mDisplayId=0 mSession=Session{8 94:122} mClient=android.os.BinderProxy@1ba5
mOwnerUid=102 mShowToOwnerOnly=false package=com.android.systemui appop=NONE
mAttrs=WM.LayoutParams{(0,0)(fillxfill) gr=#53 sim=#31 ty=2024 fl=100
Requested w=1080 h=1920 mLayoutSeq=426
mBaseLayer=211000 mSubLayer=0 mAnimLayer=211000+0=211000 mLastLayer=211000
mHasPermanentDpad=false
mCurrentFocus=Window{3a27740f u0 Application Error: com.android.chrome}
mFocusedApp=AppWindowToken{470af6f token=Token{272ec24e ActivityRecord{t894}}}
'''
with self.assertCalls(
(self.call.device.RunShellCommand(
['dumpsys', 'window', 'windows'],
check_return=True,
large_output=True), sample_dumpsys_output.split('\n')),
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '25'),
(self.call.device.RunShellCommand([
'am', 'broadcast', '-a',
'android.intent.action.CLOSE_SYSTEM_DIALOGS'
],
check_return=True)),
(self.call.device.RunShellCommand(['dumpsys', 'window', 'windows'],
check_return=True,
large_output=True), [])):
package_name = self.device.DismissCrashDialogIfNeeded()
self.assertEqual(package_name, 'com.android.chrome')
class DeviceUtilsClientCache(DeviceUtilsTest):
def testClientCache_twoCaches(self):
self.device._cache['test'] = 0
client_cache_one = self.device.GetClientCache('ClientOne')
client_cache_one['test'] = 1
client_cache_two = self.device.GetClientCache('ClientTwo')
client_cache_two['test'] = 2
self.assertEqual(self.device._cache['test'], 0)
self.assertEqual(client_cache_one, {'test': 1})
self.assertEqual(client_cache_two, {'test': 2})
self.device.ClearCache()
self.assertTrue('test' not in self.device._cache)
self.assertEqual(client_cache_one, {})
self.assertEqual(client_cache_two, {})
def testClientCache_multipleInstances(self):
client_cache_one = self.device.GetClientCache('ClientOne')
client_cache_one['test'] = 1
client_cache_two = self.device.GetClientCache('ClientOne')
self.assertEqual(client_cache_one, {'test': 1})
self.assertEqual(client_cache_two, {'test': 1})
self.device.ClearCache()
self.assertEqual(client_cache_one, {})
self.assertEqual(client_cache_two, {})
class DeviceUtilsHealthyDevicesTest(mock_calls.TestCase):
def testHealthyDevices_emptyDenylist_defaultDeviceArg(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM])):
denylist = mock.NonCallableMock(**{'Read.return_value': []})
devices = device_utils.DeviceUtils.HealthyDevices(denylist)
for serial, device in zip(test_serials, devices):
self.assertTrue(isinstance(device, device_utils.DeviceUtils))
self.assertEqual(serial, device.adb.GetDeviceSerial())
def testHealthyDevices_denylist_defaultDeviceArg(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM])):
denylist = mock.NonCallableMock(
**{'Read.return_value': ['fedcba9876543210']})
devices = device_utils.DeviceUtils.HealthyDevices(denylist)
self.assertEqual(1, len(devices))
self.assertTrue(isinstance(devices[0], device_utils.DeviceUtils))
self.assertEqual('0123456789abcdef', devices[0].adb.GetDeviceSerial())
def testHealthyDevices_noneDeviceArg_multiple_attached(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM]),
(mock.call.devil.android.device_errors.MultipleDevicesError(mock.ANY),
_MockMultipleDevicesError())):
with self.assertRaises(_MockMultipleDevicesError):
device_utils.DeviceUtils.HealthyDevices(device_arg=None)
def testHealthyDevices_noneDeviceArg_one_attached(self):
test_serials = ['0123456789abcdef']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM])):
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=None)
self.assertEqual(1, len(devices))
def testHealthyDevices_noneDeviceArg_one_attached_old_props(self):
test_serials = ['0123456789abcdef']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[]),
(mock.call.devil.android.device_utils.DeviceUtils.GetABI(), [abis.ARM
])):
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=None)
self.assertEqual(1, len(devices))
def testHealthyDevices_noneDeviceArg_one_attached_multi_abi(self):
test_serials = ['0123456789abcdef']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM, abis.X86])):
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=None,
abis=[abis.X86])
self.assertEqual(1, len(devices))
def testHealthyDevices_noneDeviceArg_no_attached(self):
test_serials = []
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials])):
with self.assertRaises(device_errors.NoDevicesError):
device_utils.DeviceUtils.HealthyDevices(device_arg=None, retries=0)
def testHealthyDevices_noneDeviceArg_multiple_attached_ANDROID_SERIAL(self):
try:
os.environ['ANDROID_SERIAL'] = '0123456789abcdef'
with self.assertCalls(): # Should skip adb devices when device is known.
device_utils.DeviceUtils.HealthyDevices(device_arg=None)
finally:
del os.environ['ANDROID_SERIAL']
def testHealthyDevices_stringDeviceArg(self):
with self.assertCalls(): # Should skip adb devices when device is known.
devices = device_utils.DeviceUtils.HealthyDevices(
device_arg='0123456789abcdef')
self.assertEqual(1, len(devices))
def testHealthyDevices_EmptyListDeviceArg_multiple_attached(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM])):
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=())
self.assertEqual(2, len(devices))
def testHealthyDevices_EmptyListDeviceArg_multiple_attached_multi_abi(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.X86]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM, abis.X86])):
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=(),
abis=[abis.X86])
self.assertEqual(2, len(devices))
def testHealthyDevices_EmptyListDeviceArg_ANDROID_SERIAL(self):
try:
os.environ['ANDROID_SERIAL'] = '0123456789abcdef'
with self.assertCalls(): # Should skip adb devices when device is known.
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=())
finally:
del os.environ['ANDROID_SERIAL']
self.assertEqual(1, len(devices))
def testHealthyDevices_EmptyListDeviceArg_no_attached(self):
test_serials = []
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials])):
with self.assertRaises(device_errors.NoDevicesError):
device_utils.DeviceUtils.HealthyDevices(device_arg=[], retries=0)
@mock.patch('time.sleep')
@mock.patch('devil.android.sdk.adb_wrapper.RestartServer')
def testHealthyDevices_EmptyListDeviceArg_no_attached_with_retry(
self, mock_restart, mock_sleep):
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), []),
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), []),
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), []),
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), []),
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), [])):
with self.assertRaises(device_errors.NoDevicesError):
device_utils.DeviceUtils.HealthyDevices(device_arg=[], retries=4)
self.assertEqual(mock_restart.call_count, 4)
self.assertEqual(
mock_sleep.call_args_list,
[mock.call(2), mock.call(4),
mock.call(8), mock.call(16)])
@mock.patch('time.sleep')
@mock.patch('devil.android.sdk.adb_wrapper.RestartServer')
def testHealthyDevices_EmptyListDeviceArg_no_attached_with_resets(
self, mock_restart, mock_sleep):
# The reset_usb import fails on windows. Mock the full import here so it can
# succeed like it would on linux.
mock_reset_import = mock.MagicMock()
sys.modules['devil.utils.reset_usb'] = mock_reset_import
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), []),
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), []),
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), []),
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), []),
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(), [])):
with self.assertRaises(device_errors.NoDevicesError):
with mock.patch.object(mock_reset_import,
'reset_all_android_devices') as mock_reset:
device_utils.DeviceUtils.HealthyDevices(
device_arg=[], retries=4, enable_usb_resets=True)
self.assertEqual(mock_reset.call_count, 1)
self.assertEqual(mock_restart.call_count, 4)
self.assertEqual(
mock_sleep.call_args_list,
[mock.call(2), mock.call(4),
mock.call(8), mock.call(16)])
def testHealthyDevices_ListDeviceArg(self):
device_arg = ['0123456789abcdef', 'fedcba9876543210']
try:
os.environ['ANDROID_SERIAL'] = 'should-not-apply'
with self.assertCalls(): # Should skip adb devices when device is known.
devices = device_utils.DeviceUtils.HealthyDevices(device_arg=device_arg)
finally:
del os.environ['ANDROID_SERIAL']
self.assertEqual(2, len(devices))
def testHealthyDevices_abisArg_no_matching_abi(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM])):
with self.assertRaises(device_errors.NoDevicesError):
device_utils.DeviceUtils.HealthyDevices(
device_arg=[], retries=0, abis=[abis.ARM_64])
def testHealthyDevices_abisArg_filter_on_abi(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCalls(
(mock.call.devil.android.sdk.adb_wrapper.AdbWrapper.Devices(),
[_AdbWrapperMock(s) for s in test_serials]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM_64]),
(mock.call.devil.android.device_utils.DeviceUtils.GetSupportedABIs(),
[abis.ARM])):
devices = device_utils.DeviceUtils.HealthyDevices(
device_arg=[], retries=0, abis=[abis.ARM_64])
self.assertEqual(1, len(devices))
class DeviceUtilsRestartAdbdTest(DeviceUtilsTest):
def testAdbdRestart(self):
mock_temp_file = '/sdcard/temp-123.sh'
with self.assertCalls(
(mock.call.devil.android.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile(mock_temp_file)),
self.call.device.WriteFile(mock.ANY, mock.ANY),
(self.call.device.RunShellCommand(
['source', mock_temp_file], check_return=True, as_root=True)),
self.call.adb.WaitForDevice()):
self.device.RestartAdbd()
class DeviceUtilsGrantPermissionsTest(DeviceUtilsTest):
def _PmGrantShellCall(self, package, permissions):
fragment = 'p=%s;for q in %s;' % (package, ' '.join(sorted(permissions)))
results = []
for permission, result in sorted(permissions.items()):
if result:
output, status = result + '\n', 1
else:
output, status = '', 0
results.append('{output}{sep}{permission}{sep}{status}{sep}\n'.format(
output=output,
permission=permission,
status=status,
sep=device_utils._SHELL_OUTPUT_SEPARATOR))
return (self.call.device.RunShellCommand(
AnyStringWith(fragment),
shell=True,
raw_output=True,
large_output=True,
check_return=True), ''.join(results))
def testGrantPermissions_none(self):
self.device.GrantPermissions('package', [])
def testGrantPermissions_one(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
with self.assertCalls(self._PmGrantShellCall('package', {'p1': 0})):
self.device.GrantPermissions('package', ['p1'])
def testGrantPermissions_multiple(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
with self.assertCalls(
self._PmGrantShellCall('package', {
'p1': 0,
'p2': 0
})):
self.device.GrantPermissions('package', ['p1', 'p2'])
def testGrantPermissions_WriteExtrnalStorage(self):
WRITE = 'android.permission.WRITE_EXTERNAL_STORAGE'
READ = 'android.permission.READ_EXTERNAL_STORAGE'
with PatchLogger() as logger:
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
with self.assertCalls(
self._PmGrantShellCall('package', {
READ: 0,
WRITE: 0
})):
self.device.GrantPermissions('package', [WRITE])
self.assertEqual(logger.warnings, [])
def testGrantPermissions_ManageExtrnalStorage(self):
with PatchLogger() as logger:
with self.patch_call(self.call.device.build_version_sdk,
return_value=version_codes.R):
with self.assertCalls(
(self.call.device.RunShellCommand(
AnyStringWith('appops set pkg MANAGE_EXTERNAL_STORAGE allow'),
shell=True,
raw_output=True,
large_output=True,
check_return=True),
'{sep}MANAGE_EXTERNAL_STORAGE{sep}0{sep}\n'.format(
sep=device_utils._SHELL_OUTPUT_SEPARATOR))):
self.device.GrantPermissions(
'pkg', ['android.permission.MANAGE_EXTERNAL_STORAGE'])
self.assertEqual(logger.warnings, [])
def testGrantPermissions_DenyList(self):
with PatchLogger() as logger:
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
with self.assertCalls(self._PmGrantShellCall('package', {'p1': 0})):
self.device.GrantPermissions('package',
['p1', 'foo.permission.C2D_MESSAGE'])
self.assertEqual(logger.warnings, [])
def testGrantPermissions_unchangeablePermision(self):
error_message = (
'Operation not allowed: java.lang.SecurityException: '
'Permission UNCHANGEABLE is not a changeable permission type')
with PatchLogger() as logger:
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.MARSHMALLOW):
with self.assertCalls(
self._PmGrantShellCall('package', {'UNCHANGEABLE': error_message})):
self.device.GrantPermissions('package', ['UNCHANGEABLE'])
self.assertEqual(logger.warnings,
[mock.ANY, AnyStringWith('UNCHANGEABLE')])
class DeviecUtilsIsScreenOn(DeviceUtilsTest):
_L_SCREEN_ON = ['test=test mInteractive=true']
_K_SCREEN_ON = ['test=test mScreenOn=true']
_L_SCREEN_OFF = ['mInteractive=false']
_K_SCREEN_OFF = ['mScreenOn=false']
def testIsScreenOn_onPreL(self):
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.KITKAT):
with self.assertCalls((self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mScreenOn'), self._K_SCREEN_ON)):
self.assertTrue(self.device.IsScreenOn())
def testIsScreenOn_onL(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCalls((self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mInteractive'), self._L_SCREEN_ON)):
self.assertTrue(self.device.IsScreenOn())
def testIsScreenOn_offPreL(self):
with self.patch_call(
self.call.device.build_version_sdk, return_value=version_codes.KITKAT):
with self.assertCalls((self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mScreenOn'), self._K_SCREEN_OFF)):
self.assertFalse(self.device.IsScreenOn())
def testIsScreenOn_offL(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCalls((self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mInteractive'), self._L_SCREEN_OFF)):
self.assertFalse(self.device.IsScreenOn())
def testIsScreenOn_noOutput(self):
with self.patch_call(
self.call.device.build_version_sdk,
return_value=version_codes.LOLLIPOP):
with self.assertCalls((self.call.device._RunPipedShellCommand(
'dumpsys input_method | grep mInteractive'), [])):
with self.assertRaises(device_errors.CommandFailedError):
self.device.IsScreenOn()
class DeviecUtilsSetScreen(DeviceUtilsTest):
@mock.patch('time.sleep', mock.Mock())
def testSetScren_alreadySet(self):
with self.assertCalls((self.call.device.IsScreenOn(), False)):
self.device.SetScreen(False)
@mock.patch('time.sleep', mock.Mock())
def testSetScreen_on(self):
with self.assertCalls(
(self.call.device.IsScreenOn(), False),
(self.call.device.SendKeyEvent(keyevent.KEYCODE_POWER), None),
(self.call.device.IsScreenOn(), True)):
self.device.SetScreen(True)
@mock.patch('time.sleep', mock.Mock())
def testSetScreen_off(self):
with self.assertCalls(
(self.call.device.IsScreenOn(), True),
(self.call.device.SendKeyEvent(keyevent.KEYCODE_POWER), None),
(self.call.device.IsScreenOn(), False)):
self.device.SetScreen(False)
@mock.patch('time.sleep', mock.Mock())
def testSetScreen_slow(self):
with self.assertCalls(
(self.call.device.IsScreenOn(), True),
(self.call.device.SendKeyEvent(keyevent.KEYCODE_POWER), None),
(self.call.device.IsScreenOn(), True),
(self.call.device.IsScreenOn(), True),
(self.call.device.IsScreenOn(), False)):
self.device.SetScreen(False)
class DeviecUtilsLoadCacheData(DeviceUtilsTest):
def testInvalidJson(self):
self.assertFalse(self.device.LoadCacheData(''))
def testTokenMissing(self):
with self.assertCalls(self.EnsureCacheInitialized()):
self.assertFalse(self.device.LoadCacheData('{}'))
def testTokenStale(self):
with self.assertCalls(self.EnsureCacheInitialized()):
self.assertFalse(self.device.LoadCacheData('{"token":"foo"}'))
def testTokenMatches(self):
with self.assertCalls(self.EnsureCacheInitialized()):
self.assertTrue(self.device.LoadCacheData('{"token":"TOKEN"}'))
def testDumpThenLoad(self):
with self.assertCalls(self.EnsureCacheInitialized()):
data = json.loads(self.device.DumpCacheData())
data['token'] = 'TOKEN'
self.assertTrue(self.device.LoadCacheData(json.dumps(data)))
class DeviceUtilsGetIMEITest(DeviceUtilsTest):
def testSuccessfulDumpsys(self):
dumpsys_output = ('Phone Subscriber Info:'
' Phone Type = GSM'
' Device ID = 123454321')
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.adb.Shell('dumpsys iphonesubinfo'), dumpsys_output)):
self.assertEqual(self.device.GetIMEI(), '123454321')
def testSuccessfulServiceCall(self):
service_output = """
Result: Parcel(\n'
0x00000000: 00000000 0000000f 00350033 00360033 '........7.6.5.4.'
0x00000010: 00360032 00370030 00300032 00300039 '3.2.1.0.1.2.3.4.'
0x00000020: 00380033 00000039 '5.6.7... ')
"""
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '24'),
(self.call.adb.Shell('service call iphonesubinfo 1'), service_output)):
self.assertEqual(self.device.GetIMEI(), '765432101234567')
def testNoIMEI(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '19'),
(self.call.adb.Shell('dumpsys iphonesubinfo'), 'no device id')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetIMEI()
def testAdbError(self):
with self.assertCalls(
(self.call.device.GetProp('ro.build.version.sdk', cache=True), '24'),
(self.call.adb.Shell('service call iphonesubinfo 1'),
self.ShellError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetIMEI()
class DeviceUtilsChangeOwner(DeviceUtilsTest):
def testChangeOwner(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['chown', 'user.group', '/path/to/file1', 'file2'],
check_return=True))):
self.device.ChangeOwner('user.group', ['/path/to/file1', 'file2'])
class DeviceUtilsChangeSecurityContext(DeviceUtilsTest):
def testChangeSecurityContext(self):
with self.assertCalls((self.call.device.RunShellCommand(
['chcon', 'u:object_r:system_data_file:s0', '/path', '/path2'],
as_root=device_utils._FORCE_SU,
check_return=True))):
self.device.ChangeSecurityContext('u:object_r:system_data_file:s0',
['/path', '/path2'])
class DeviceUtilsLocale(DeviceUtilsTest):
def testLocaleLegacy(self):
with self.assertCalls(
(self.call.device.GetProp('persist.sys.locale', cache=False), ''),
(self.call.device.GetProp('persist.sys.language', cache=False), 'en'),
(self.call.device.GetProp('persist.sys.country', cache=False), 'US')):
self.assertEqual(self.device.GetLocale(), ('en', 'US'))
def testLocale(self):
with self.assertCalls(
(self.call.device.GetProp('persist.sys.locale', cache=False), 'en-US'),
(self.call.device.GetProp('persist.sys.locale', cache=False),
'en-US-sw')):
self.assertEqual(self.device.GetLocale(), ('en', 'US'))
self.assertEqual(self.device.GetLocale(), ('en', 'US-sw'))
def testBadLocale(self):
with self.assertCalls((self.call.device.GetProp(
'persist.sys.locale', cache=False), 'en')):
self.assertEqual(self.device.GetLocale(), ('', ''))
def testLanguageAndCountryLegacy(self):
with self.assertCalls(
(self.call.device.GetProp('persist.sys.locale', cache=False), ''),
(self.call.device.GetProp('persist.sys.language', cache=False), 'en'),
(self.call.device.GetProp('persist.sys.country', cache=False), 'US'),
(self.call.device.GetProp('persist.sys.locale', cache=False), ''),
(self.call.device.GetProp('persist.sys.language', cache=False), 'en'),
(self.call.device.GetProp('persist.sys.country', cache=False), 'US')):
self.assertEqual(self.device.GetLanguage(), 'en')
self.assertEqual(self.device.GetCountry(), 'US')
def testLanguageAndCountry(self):
with self.assertCalls(
(self.call.device.GetProp('persist.sys.locale', cache=False), 'en-US'),
(self.call.device.GetProp('persist.sys.locale', cache=False), 'en-US')):
self.assertEqual(self.device.GetLanguage(), 'en')
self.assertEqual(self.device.GetCountry(), 'US')
class IterPushableComponentsTest(unittest.TestCase):
@classmethod
@contextlib.contextmanager
def sampleLayout(cls):
Layout = collections.namedtuple('Layout', [
'root', 'basic_file', 'symlink_file', 'symlink_dir',
'dir_with_symlinks', 'dir_without_symlinks'
])
with tempfile_ext.NamedTemporaryDirectory() as layout_root:
dir1 = os.path.join(layout_root, 'dir1')
os.makedirs(dir1)
basic_file = os.path.join(dir1, 'file1.txt')
with open(basic_file, 'w') as f:
f.write('hello world')
symlink = os.path.join(dir1, 'symlink.txt')
os.symlink(basic_file, symlink)
dir2 = os.path.join(layout_root, 'dir2')
os.makedirs(dir2)
with open(os.path.join(dir2, 'file2.txt'), 'w') as f:
f.write('goodnight moon')
symlink_dir = os.path.join(layout_root, 'dir3')
os.symlink(dir2, symlink_dir)
yield Layout(layout_root, basic_file, symlink, symlink_dir, dir1, dir2)
def safeAssertItemsEqual(self, expected, actual):
if six.PY2:
self.assertItemsEqual(expected, actual)
else:
self.assertCountEqual(expected, actual) # pylint: disable=no-member
def testFile(self):
with self.sampleLayout() as layout:
device_path = '/sdcard/basic_file'
expected = [(layout.basic_file, device_path, True)]
actual = list(
device_utils._IterPushableComponents(layout.basic_file, device_path))
self.safeAssertItemsEqual(expected, actual)
def testSymlinkFile(self):
with self.sampleLayout() as layout:
device_path = '/sdcard/basic_symlink'
expected = [(os.path.realpath(layout.symlink_file), device_path, False)]
actual = list(
device_utils._IterPushableComponents(layout.symlink_file,
device_path))
self.safeAssertItemsEqual(expected, actual)
def testDirectoryWithNoSymlink(self):
with self.sampleLayout() as layout:
device_path = '/sdcard/basic_directory'
expected = [(layout.dir_without_symlinks, device_path, True)]
actual = list(
device_utils._IterPushableComponents(layout.dir_without_symlinks,
device_path))
self.safeAssertItemsEqual(expected, actual)
def testDirectoryWithSymlink(self):
with self.sampleLayout() as layout:
device_path = '/sdcard/directory'
expected = [
(layout.basic_file,
posixpath.join(device_path, os.path.basename(layout.basic_file)),
True),
(os.path.realpath(layout.symlink_file),
posixpath.join(device_path, os.path.basename(layout.symlink_file)),
False),
]
actual = list(
device_utils._IterPushableComponents(layout.dir_with_symlinks,
device_path))
self.safeAssertItemsEqual(expected, actual)
def testSymlinkDirectory(self):
with self.sampleLayout() as layout:
device_path = '/sdcard/directory'
expected = [(os.path.realpath(layout.symlink_dir), device_path, False)]
actual = list(
device_utils._IterPushableComponents(layout.symlink_dir, device_path))
self.safeAssertItemsEqual(expected, actual)
def testDirectoryWithNestedSymlink(self):
with self.sampleLayout() as layout:
device_path = '/sdcard/directory'
expected = [
(layout.dir_without_symlinks,
posixpath.join(device_path,
os.path.basename(layout.dir_without_symlinks)), True),
(layout.basic_file,
posixpath.join(
device_path,
*os.path.split(os.path.relpath(layout.basic_file, layout.root))),
True),
(os.path.realpath(layout.symlink_file),
posixpath.join(
device_path,
*os.path.split(
os.path.relpath(layout.symlink_file, layout.root))), False),
(os.path.realpath(layout.symlink_dir),
posixpath.join(
device_path,
*os.path.split(os.path.relpath(layout.symlink_dir,
layout.root))), False),
]
actual = list(
device_utils._IterPushableComponents(layout.root, device_path))
self.safeAssertItemsEqual(expected, actual)
class DeviceUtilsGetTracingPathTest(DeviceUtilsTest):
def testGetTracingPath_hasDebugfs(self):
with self.assertCalls(
(self.call.device.RunShellCommand(['mount'], retries=0,
timeout=10, check_return=True),
['debugfs on /sys/kernel/debug', 'proc on /proc'])):
self.assertEqual('/sys/kernel/debug/tracing',
self.device.GetTracingPath())
def testGetTracingPath_noDebugfs(self):
with self.assertCalls(
(self.call.device.RunShellCommand(['mount'], retries=0,
timeout=10, check_return=True),
['proc on /proc'])):
self.assertEqual('/sys/kernel/tracing', self.device.GetTracingPath())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
catapult-project/catapult
|
devil/devil/android/device_utils_test.py
|
Python
|
bsd-3-clause
| 183,737
|
[
"MOE"
] |
e788d44a885085ccbe191ccd12bda59cc00ce3d9d090f245f92c499465950977
|
# -*- coding: utf-8 -*-
#AUTHOR: Samuel M.H. <[email protected]>
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
import django.http
from models import Redirection
@login_required
def main(request):
return(render(request, 'main.html',{
'page':'smh_redirections',
'username': request.user.username,
}))
@login_required
def help(request):
return(render(request, 'help.html'))
class RedirectionView(View):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(self.__class__, self).dispatch(*args, **kwargs)
def get(self,request,username,alias):
try:
retval = redirect(Redirection.visit(username, alias), permanent=False)
except:
raise django.http.Http404
return(retval)
def post(self,request,username,alias):
retval = None
try:
#Get Redirection object
try:
r = Redirection.objects.get(id=Redirection._mkid(username,alias))
except:
msg = 'Not "{0}" redirection for username "{1}".'.format(alias,username)
retval = django.http.HttpResponseNotFound(msg)
raise ValueError(msg)
#Password
try:
password = request.POST['password']
except:
msg = 'Missing param "password".'
retval = django.http.HttpResponseBadRequest(msg)
raise ValueError(msg)
else:
if r.password=='':
msg = 'Empty configured password (not editable).'
retval = django.http.HttpResponseNotAllowed(msg)
raise ValueError(msg)
if r.password!=password:
msg = 'Wrong password.'
retval = django.http.HttpResponseNotAllowed(msg)
raise ValueError(msg)
#URL and update
try:
url = request.POST['url']
except:
url = 'http://'+request.META['REMOTE_ADDR']
finally:
r.url = url
r.save()
retval = django.http.HttpResponse('Redirection refreshed.')
except Exception as e:
pass
return(retval)
|
samuelmh/django-smh_redirections
|
views.py
|
Python
|
lgpl-3.0
| 2,555
|
[
"VisIt"
] |
84b93a2d79c08138c1c26f987aa4417a57e4e988d22c73e3cde04feb2269bb20
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
zaxtax/scikit-learn
|
examples/linear_model/plot_ard.py
|
Python
|
bsd-3-clause
| 2,828
|
[
"Gaussian"
] |
9e724505902f79bd04a96db7eb55f103c8819f461a8326a00f4702f5dcddc39d
|
# coding: utf-8
from __future__ import unicode_literals
import io
import logging
import os
config_text = 'site_name: My Docs\n'
index_text = """# Welcome to MkDocs
For full documentation visit [mkdocs.org](http://mkdocs.org).
## Commands
* `mkdocs new [dir-name]` - Create a new project.
* `mkdocs serve` - Start the live-reloading docs server.
* `mkdocs build` - Build the documentation site.
* `mkdocs help` - Print this help message.
## Project layout
mkdocs.yml # The configuration file.
docs/
index.md # The documentation homepage.
... # Other markdown pages, images and other files.
"""
log = logging.getLogger(__name__)
def new(output_dir):
docs_dir = os.path.join(output_dir, 'docs')
config_path = os.path.join(output_dir, 'mkdocs.yml')
index_path = os.path.join(docs_dir, 'index.md')
if os.path.exists(config_path):
log.info('Project already exists.')
return
if not os.path.exists(output_dir):
log.info('Creating project directory: %s', output_dir)
os.mkdir(output_dir)
log.info('Writing config file: %s', config_path)
io.open(config_path, 'w', encoding='utf-8').write(config_text)
if os.path.exists(index_path):
return
log.info('Writing initial docs: %s', index_path)
if not os.path.exists(docs_dir):
os.mkdir(docs_dir)
io.open(index_path, 'w', encoding='utf-8').write(index_text)
|
samhatfield/mkdocs
|
mkdocs/new.py
|
Python
|
bsd-2-clause
| 1,433
|
[
"VisIt"
] |
c20ad2d1b85cca487c945fa3c70dfd06ad6f64304289629474adab24d1ff09ca
|
import os
import sys
import version
from setuptools import setup, find_packages
_here = os.path.dirname(__file__)
f = open(os.path.join(_here, 'README.md'), 'r')
README = f.read()
f.close()
install_requires = ['lxml', 'PyICU']
if sys.version_info[0] == 2:
# python2 does not have mock in the standard lib
install_requires.append('mock')
setup(name="mp.importer",
version=version.getVersion(),
description="Utilities to ease imports of content to MetroPublisher.",
packages=find_packages(),
long_description=README,
license='BSD',
author="Vanguardistas LLC",
author_email='[email protected]',
install_requires=install_requires,
include_package_data=True,
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
],
test_suite="mp.importer.tests",
)
|
vanguardistas/mp.importer
|
setup.py
|
Python
|
mit
| 1,151
|
[
"Brian"
] |
70634d86bcb99a0e85beec2cec3f613ab8f075387fdd2c615eba93d71fbaad73
|
#!/usr/bin/env python
import os
import sys
import sqlite3
from collections import defaultdict, namedtuple
import atexit
import json
import subprocess
import tempfile
import numpy as np
from scipy.stats import mode
import pysam
import cyvcf as vcf
from gemini.annotations import annotations_in_region, annotations_in_vcf, guess_contig_naming
from database import database_transaction
def add_requested_columns(args, update_cursor, col_names, col_types=None):
"""
Attempt to add new, user-defined columns to the
variants table. Warn if the column already exists.
"""
if args.anno_type in ["count", "boolean"]:
col_name = col_names[0]
col_type = "integer"
try:
alter_qry = "ALTER TABLE variants ADD COLUMN " \
+ col_name \
+ " " \
+ col_type \
+ " " \
+ "DEFAULT NULL"
update_cursor.execute(alter_qry)
except sqlite3.OperationalError:
sys.stderr.write("WARNING: Column \"("
+ col_name
+ ")\" already exists in variants table. Overwriting values.\n")
# reset values so that records don't retain old annotations.
update_cursor.execute("UPDATE variants SET " + col_name + " = NULL WHERE 1")
elif args.anno_type == "extract":
for col_name, col_type in zip(col_names, col_types):
try:
alter_qry = "ALTER TABLE variants ADD COLUMN " \
+ col_name \
+ " " \
+ col_type \
+ " " \
+ "DEFAULT NULL"
update_cursor.execute(alter_qry)
except sqlite3.OperationalError:
sys.stderr.write("WARNING: Column \"("
+ col_name
+ ")\" already exists in variants table. Overwriting values.\n")
else:
sys.exit("Unknown annotation type: %s\n" % args.anno_type)
def _annotate_variants(args, conn, get_val_fn, col_names=None, col_types=None, col_ops=None):
"""Generalized annotation of variants with a new column.
get_val_fn takes a list of annotations in a region and returns
the value for that region to update the database with.
Separates selection and identification of values from update,
to avoid concurrent database access errors from sqlite3, especially on
NFS systems. The retained to_update list is small, but batching
could help if memory issues emerge.
"""
# For each, use Tabix to detect overlaps with the user-defined
# annotation file. Update the variant row with T/F if overlaps found.
anno = pysam.Tabixfile(args.anno_file)
naming = guess_contig_naming(anno)
select_cursor = conn.cursor()
update_cursor = conn.cursor()
add_requested_columns(args, select_cursor, col_names, col_types)
last_id = 0
current_id = 0
total = 0
CHUNK_SIZE = 100000
to_update = []
select_cursor.execute('''SELECT chrom, start, end, ref, alt, variant_id FROM variants''')
while True:
for row in select_cursor.fetchmany(CHUNK_SIZE):
# update_data starts out as a list of the values that should
# be used to populate the new columns for the current row.
# Prefer no pysam parsing over tuple parsing to work around bug in pysam 0.8.0
# https://github.com/pysam-developers/pysam/pull/44
if args.anno_file.endswith(('.vcf', '.vcf.gz')):
update_data = get_val_fn(annotations_in_vcf(row, anno, None, naming, args.region_only, True))
else:
update_data = get_val_fn(annotations_in_region(row, anno, None, naming))
#update_data = get_val_fn(annotations_in_region(row, anno, "tuple", naming))
# were there any hits for this row?
if len(update_data) > 0:
# we add the primary key to update_data for the
# where clause in the SQL UPDATE statement.
update_data.append(str(row["variant_id"]))
to_update.append(tuple(update_data))
current_id = row["variant_id"]
if current_id <= last_id:
break
else:
update_cursor.execute("BEGIN TRANSACTION")
_update_variants(to_update, col_names, update_cursor)
update_cursor.execute("END TRANSACTION")
total += len(to_update)
print "updated", total, "variants"
last_id = current_id
to_update = []
def _update_variants(to_update, col_names, cursor):
update_qry = "UPDATE variants SET "
update_cols = ",".join(col_name + " = ?" for col_name in col_names)
update_qry += update_cols
update_qry += " WHERE variant_id = ?"
cursor.executemany(update_qry, to_update)
def annotate_variants_bool(args, conn, col_names):
"""
Populate a new, user-defined column in the variants
table with a BOOLEAN indicating whether or not
overlaps were detected between the variant and the
annotation file.
"""
def has_hit(hits):
for hit in hits:
return [1]
return [0]
return _annotate_variants(args, conn, has_hit, col_names)
def annotate_variants_count(args, conn, col_names):
"""
Populate a new, user-defined column in the variants
table with a INTEGER indicating the count of overlaps
between the variant and the
annotation file.
"""
def get_hit_count(hits):
return [len(list(hits))]
return _annotate_variants(args, conn, get_hit_count, col_names)
def _map_list_types(hit_list, col_type):
# TODO: handle missing because of VCF.
try:
if col_type == "int":
return [int(h) for h in hit_list if not h in (None, 'nan')]
elif col_type == "float":
return [float(h) for h in hit_list if not h in (None, 'nan')]
except ValueError:
sys.exit('Non-numeric value found in annotation file: %s\n' % (','.join(hit_list)))
def gemops_mean(li, col_type):
return np.average(_map_list_types(li, col_type))
def gemops_sum(li, col_type):
return np.sum(_map_list_types(li, col_type))
def gemops_list(li, col_type):
return ",".join(li)
def gemops_uniq_list(li, col_type):
return ",".join(set(li))
def gemops_median(li, col_type):
return np.median(_map_list_types(li, col_type))
def gemops_min(li, col_type):
return np.min(_map_list_types(li, col_type))
def gemops_max(li, col_type):
return np.max(_map_list_types(li, col_type))
def gemops_mode(li, col_type):
return mode(_map_list_types(li, col_type))[0][0]
def gemops_first(li, col_type):
return li[0]
def gemops_last(li, col_type):
return li[-1]
# lookup from the name to the func above.
op_funcs = dict((k[7:], v) for k, v in locals().items() if k.startswith('gemops_'))
def fix_val(val, type):
if not type in ("int", "float"): return val
if isinstance(val, (int, float)): return val
if type == "int": fn = int
else: fn = float
if not val:
return None
try:
return fn(val)
except ValueError:
sys.exit('Non %s value found in annotation file: %s\n' % (type, val))
def get_hit_list(hits, col_idxs, args):
hits = list(hits)
if len(hits) == 0:
return []
hit_list = defaultdict(list)
for hit in hits:
if isinstance(hit, basestring):
hit = hit.split("\t")
if args.anno_file.endswith(('.vcf', '.vcf.gz')):
# only makes sens to extract when there is an equal sign
info = dict((x[0], x[1]) for x in (p.split('=') for p in hit[7].split(';') if '=' in p))
for idx, col_idx in enumerate(col_idxs):
if not col_idx in info:
hit_list[idx].append('nan')
sys.stderr.write("WARNING: %s is missing from INFO field in %s for at "
"least one record.\n" % (col_idx, args.anno_file))
else:
hit_list[idx].append(info[col_idx])
# just append None since in a VCFthey are likely # to be missing ?
else:
try:
for idx, col_idx in enumerate(col_idxs):
hit_list[idx].append(hit[int(col_idx) - 1])
except IndexError:
sys.exit("EXITING: Column " + args.col_extracts + " exceeds "
"the number of columns in your "
"annotation file.\n")
return hit_list
def annotate_variants_extract(args, conn, col_names, col_types, col_ops, col_idxs):
"""
Populate a new, user-defined column in the variants
table based on the value(s) from a specific column.
in the annotation file.
"""
def summarize_hits(hits):
hit_list = get_hit_list(hits, col_idxs, args)
if hit_list == []: return []
vals = []
for idx, op in enumerate(col_ops):
# more than one overlap, must summarize
try:
val = op_funcs[op](hit_list[idx], col_types[idx])
except ValueError:
val = None
vals.append(fix_val(val, col_types[idx]))
return vals
return _annotate_variants(args, conn, summarize_hits,
col_names, col_types, col_ops)
def annotate(parser, args):
def _validate_args(args):
if (args.col_operations or args.col_types or args.col_extracts):
sys.exit('EXITING: You may only specify a column name (-c) when '
'using \"-a boolean\" or \"-a count\".\n')
col_names = args.col_names.split(',')
if len(col_names) > 1:
sys.exit('EXITING: You may only specify a single column name (-c) '
'when using \"-a boolean\" or \"-a count\".\n')
if not args.anno_file.endswith(('.vcf', '.vcf.gz')) and args.region_only and parser is not None:
sys.exit('EXITING: You may only specify --region-only when annotation is a VCF.')
return col_names
def _validate_extract_args(args):
if args.anno_file.endswith(('.vcf', '.vcf.gz')):
if not args.col_names:
args.col_names = args.col_extracts
elif not args.col_extracts:
args.col_extracts = args.col_names
elif args.region_only and parser is not None:
sys.exit('EXITING: You may only specify --region-only when annotation is a VCF.1')
if not args.col_types:
sys.exit('EXITING: need to give column types ("-t")\n')
col_ops = args.col_operations.split(',')
col_idxs = args.col_extracts.split(',')
col_names = args.col_names.split(',')
col_types = args.col_types.split(',')
supported_types = ['text', 'float', 'integer']
for col_type in col_types:
if col_type not in supported_types:
sys.exit('EXITING: Column type [%s] not supported.\n' %
(col_type))
supported_ops = op_funcs.keys()
for col_op in col_ops:
if col_op not in supported_ops:
sys.exit('EXITING: Column operation [%s] not supported.\n' %
(col_op))
if not (len(col_ops) == len(col_names) ==
len(col_types) == len(col_idxs)):
sys.exit('EXITING: The number of column names, numbers, types, and '
'operations must match: [%s], [%s], [%s], [%s]\n' %
(args.col_names, args.col_extracts, args.col_types, args.col_operations))
return col_names, col_types, col_ops, col_idxs
if (args.db is None):
parser.print_help()
exit(1)
if not os.path.exists(args.db):
sys.stderr.write("Error: cannot find database file.")
exit(1)
if not os.path.exists(args.anno_file):
sys.stderr.write("Error: cannot find annotation file.")
exit(1)
conn = sqlite3.connect(args.db)
conn.row_factory = sqlite3.Row # allow us to refer to columns by name
conn.isolation_level = None
if args.anno_type == "boolean":
col_names = _validate_args(args)
annotate_variants_bool(args, conn, col_names)
elif args.anno_type == "count":
col_names = _validate_args(args)
annotate_variants_count(args, conn, col_names)
elif args.anno_type == "extract":
if args.col_extracts is None and not args.anno_file.endswith('.vcf.gz'):
sys.exit("You must specify which column to "
"extract from your annotation file.")
else:
col_names, col_types, col_ops, col_idxs = _validate_extract_args(args)
annotate_variants_extract(args, conn, col_names, col_types, col_ops, col_idxs)
else:
sys.exit("Unknown column type requested. Exiting.")
conn.close()
# index on the newly created columns
for col_name in col_names:
with database_transaction(args.db) as c:
c.execute('''drop index if exists %s''' % (col_name + "idx"))
c.execute('''create index %s on variants(%s)''' % (col_name + "idx", col_name))
# ## Automate addition of extra fields to database
def add_extras(gemini_db, chunk_dbs, region_only):
"""Annotate gemini database with extra columns from processed chunks, if available.
"""
for chunk in chunk_dbs:
extra_file = get_extra_vcf(chunk)
if extra_file is False:
# there was not extra annotation so we just continue
continue
# these the the field names that we'll pull from the info field.
fields = [x.strip() for x in open(extra_file[:-3] + ".fields")]
ops = ["first" for t in fields]
Args = namedtuple("Args", "db,anno_file,anno_type,col_operations,col_names,col_types,col_extracts,region_only")
# TODO: hard-coded "text" into the type...
args = Args(gemini_db, extra_file, "extract", ",".join(ops),
",".join(fields), ",".join(["text"] * len(fields)),
",".join(fields),
region_only)
annotate(None, args)
os.unlink(extra_file[:-3] + ".fields")
def rm(path):
try:
os.unlink(path)
except:
pass
def get_extra_vcf(gemini_db, tmpl=None):
"""Retrieve extra file associated with a gemini database.
Most commonly, this will be with VEP annotations added.
Returns false if there are no vcfs associated with the database.
"""
base = os.path.basename(gemini_db)
path = os.path.join(tempfile.gettempdir(), "extra.%s.vcf" % base)
mode = "r" if tmpl is None else "w"
if mode == "r":
if not os.path.exists(path):
return False
if not path.endswith(".gz"):
subprocess.check_call(["bgzip", "-f", path])
bgzip_out = path + ".gz"
subprocess.check_call(["tabix", "-p", "vcf", "-f", bgzip_out])
return bgzip_out
return path
fh = open(path, "w")
if mode == "w":
atexit.register(rm, fh.name)
atexit.register(rm, fh.name + ".gz")
atexit.register(rm, fh.name + ".gz.tbi")
return vcf.Writer(fh, tmpl)
return vcf.Reader(fh)
|
heuermh/gemini
|
gemini/gemini_annotate.py
|
Python
|
mit
| 15,422
|
[
"pysam"
] |
5f21c5861601396e0629a034fda9521544b30a638f670cca61ca659892d6c518
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.documentai_v1beta3.services.document_processor_service import (
DocumentProcessorServiceAsyncClient,
)
from google.cloud.documentai_v1beta3.services.document_processor_service import (
DocumentProcessorServiceClient,
)
from google.cloud.documentai_v1beta3.services.document_processor_service import pagers
from google.cloud.documentai_v1beta3.services.document_processor_service import (
transports,
)
from google.cloud.documentai_v1beta3.types import document
from google.cloud.documentai_v1beta3.types import document_io
from google.cloud.documentai_v1beta3.types import document_processor_service
from google.cloud.documentai_v1beta3.types import geometry
from google.cloud.documentai_v1beta3.types import processor
from google.cloud.documentai_v1beta3.types import processor as gcd_processor
from google.cloud.documentai_v1beta3.types import processor_type
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from google.type import color_pb2 # type: ignore
from google.type import date_pb2 # type: ignore
from google.type import datetime_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
from google.type import postal_address_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DocumentProcessorServiceClient._get_default_mtls_endpoint(None) is None
assert (
DocumentProcessorServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
DocumentProcessorServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DocumentProcessorServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DocumentProcessorServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
DocumentProcessorServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[DocumentProcessorServiceClient, DocumentProcessorServiceAsyncClient,],
)
def test_document_processor_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "documentai.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.DocumentProcessorServiceGrpcTransport, "grpc"),
(transports.DocumentProcessorServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_document_processor_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[DocumentProcessorServiceClient, DocumentProcessorServiceAsyncClient,],
)
def test_document_processor_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "documentai.googleapis.com:443"
def test_document_processor_service_client_get_transport_class():
transport = DocumentProcessorServiceClient.get_transport_class()
available_transports = [
transports.DocumentProcessorServiceGrpcTransport,
]
assert transport in available_transports
transport = DocumentProcessorServiceClient.get_transport_class("grpc")
assert transport == transports.DocumentProcessorServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
DocumentProcessorServiceClient,
transports.DocumentProcessorServiceGrpcTransport,
"grpc",
),
(
DocumentProcessorServiceAsyncClient,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DocumentProcessorServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentProcessorServiceClient),
)
@mock.patch.object(
DocumentProcessorServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentProcessorServiceAsyncClient),
)
def test_document_processor_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
DocumentProcessorServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
DocumentProcessorServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
DocumentProcessorServiceClient,
transports.DocumentProcessorServiceGrpcTransport,
"grpc",
"true",
),
(
DocumentProcessorServiceAsyncClient,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
DocumentProcessorServiceClient,
transports.DocumentProcessorServiceGrpcTransport,
"grpc",
"false",
),
(
DocumentProcessorServiceAsyncClient,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DocumentProcessorServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentProcessorServiceClient),
)
@mock.patch.object(
DocumentProcessorServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentProcessorServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_document_processor_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[DocumentProcessorServiceClient, DocumentProcessorServiceAsyncClient],
)
@mock.patch.object(
DocumentProcessorServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentProcessorServiceClient),
)
@mock.patch.object(
DocumentProcessorServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentProcessorServiceAsyncClient),
)
def test_document_processor_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
DocumentProcessorServiceClient,
transports.DocumentProcessorServiceGrpcTransport,
"grpc",
),
(
DocumentProcessorServiceAsyncClient,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_document_processor_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
DocumentProcessorServiceClient,
transports.DocumentProcessorServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
DocumentProcessorServiceAsyncClient,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_document_processor_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_document_processor_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.documentai_v1beta3.services.document_processor_service.transports.DocumentProcessorServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DocumentProcessorServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
DocumentProcessorServiceClient,
transports.DocumentProcessorServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
DocumentProcessorServiceAsyncClient,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_document_processor_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"documentai.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="documentai.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [document_processor_service.ProcessRequest, dict,]
)
def test_process_document(request_type, transport: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document_processor_service.ProcessResponse(
human_review_operation="human_review_operation_value",
)
response = client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.ProcessRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, document_processor_service.ProcessResponse)
assert response.human_review_operation == "human_review_operation_value"
def test_process_document_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
client.process_document()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.ProcessRequest()
@pytest.mark.asyncio
async def test_process_document_async(
transport: str = "grpc_asyncio",
request_type=document_processor_service.ProcessRequest,
):
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document_processor_service.ProcessResponse(
human_review_operation="human_review_operation_value",
)
)
response = await client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.ProcessRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, document_processor_service.ProcessResponse)
assert response.human_review_operation == "human_review_operation_value"
@pytest.mark.asyncio
async def test_process_document_async_from_dict():
await test_process_document_async(request_type=dict)
def test_process_document_field_headers():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.ProcessRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
call.return_value = document_processor_service.ProcessResponse()
client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_process_document_field_headers_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.ProcessRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document_processor_service.ProcessResponse()
)
await client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_process_document_flattened():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document_processor_service.ProcessResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.process_document(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_process_document_flattened_error():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.process_document(
document_processor_service.ProcessRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_process_document_flattened_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.process_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document_processor_service.ProcessResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document_processor_service.ProcessResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.process_document(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_process_document_flattened_error_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.process_document(
document_processor_service.ProcessRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [document_processor_service.BatchProcessRequest, dict,]
)
def test_batch_process_documents(request_type, transport: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.BatchProcessRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_process_documents_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
client.batch_process_documents()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.BatchProcessRequest()
@pytest.mark.asyncio
async def test_batch_process_documents_async(
transport: str = "grpc_asyncio",
request_type=document_processor_service.BatchProcessRequest,
):
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.BatchProcessRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_process_documents_async_from_dict():
await test_batch_process_documents_async(request_type=dict)
def test_batch_process_documents_field_headers():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.BatchProcessRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_process_documents_field_headers_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.BatchProcessRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_batch_process_documents_flattened():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_process_documents(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_batch_process_documents_flattened_error():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_process_documents(
document_processor_service.BatchProcessRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_batch_process_documents_flattened_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_process_documents(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_batch_process_documents_flattened_error_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_process_documents(
document_processor_service.BatchProcessRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [document_processor_service.FetchProcessorTypesRequest, dict,]
)
def test_fetch_processor_types(request_type, transport: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.fetch_processor_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = document_processor_service.FetchProcessorTypesResponse()
response = client.fetch_processor_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.FetchProcessorTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, document_processor_service.FetchProcessorTypesResponse)
def test_fetch_processor_types_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.fetch_processor_types), "__call__"
) as call:
client.fetch_processor_types()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.FetchProcessorTypesRequest()
@pytest.mark.asyncio
async def test_fetch_processor_types_async(
transport: str = "grpc_asyncio",
request_type=document_processor_service.FetchProcessorTypesRequest,
):
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.fetch_processor_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document_processor_service.FetchProcessorTypesResponse()
)
response = await client.fetch_processor_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.FetchProcessorTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, document_processor_service.FetchProcessorTypesResponse)
@pytest.mark.asyncio
async def test_fetch_processor_types_async_from_dict():
await test_fetch_processor_types_async(request_type=dict)
def test_fetch_processor_types_field_headers():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.FetchProcessorTypesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.fetch_processor_types), "__call__"
) as call:
call.return_value = document_processor_service.FetchProcessorTypesResponse()
client.fetch_processor_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_fetch_processor_types_field_headers_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.FetchProcessorTypesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.fetch_processor_types), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document_processor_service.FetchProcessorTypesResponse()
)
await client.fetch_processor_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_fetch_processor_types_flattened():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.fetch_processor_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = document_processor_service.FetchProcessorTypesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.fetch_processor_types(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_fetch_processor_types_flattened_error():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.fetch_processor_types(
document_processor_service.FetchProcessorTypesRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_fetch_processor_types_flattened_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.fetch_processor_types), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = document_processor_service.FetchProcessorTypesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document_processor_service.FetchProcessorTypesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.fetch_processor_types(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_fetch_processor_types_flattened_error_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.fetch_processor_types(
document_processor_service.FetchProcessorTypesRequest(),
parent="parent_value",
)
@pytest.mark.parametrize(
"request_type", [document_processor_service.ListProcessorsRequest, dict,]
)
def test_list_processors(request_type, transport: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_processors), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document_processor_service.ListProcessorsResponse(
next_page_token="next_page_token_value",
)
response = client.list_processors(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.ListProcessorsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListProcessorsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_processors_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_processors), "__call__") as call:
client.list_processors()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.ListProcessorsRequest()
@pytest.mark.asyncio
async def test_list_processors_async(
transport: str = "grpc_asyncio",
request_type=document_processor_service.ListProcessorsRequest,
):
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_processors), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document_processor_service.ListProcessorsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_processors(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.ListProcessorsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListProcessorsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_processors_async_from_dict():
await test_list_processors_async(request_type=dict)
def test_list_processors_field_headers():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.ListProcessorsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_processors), "__call__") as call:
call.return_value = document_processor_service.ListProcessorsResponse()
client.list_processors(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_processors_field_headers_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.ListProcessorsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_processors), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document_processor_service.ListProcessorsResponse()
)
await client.list_processors(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_processors_flattened():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_processors), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document_processor_service.ListProcessorsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_processors(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_processors_flattened_error():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_processors(
document_processor_service.ListProcessorsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_processors_flattened_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_processors), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document_processor_service.ListProcessorsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document_processor_service.ListProcessorsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_processors(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_processors_flattened_error_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_processors(
document_processor_service.ListProcessorsRequest(), parent="parent_value",
)
def test_list_processors_pager(transport_name: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_processors), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
document_processor_service.ListProcessorsResponse(
processors=[
processor.Processor(),
processor.Processor(),
processor.Processor(),
],
next_page_token="abc",
),
document_processor_service.ListProcessorsResponse(
processors=[], next_page_token="def",
),
document_processor_service.ListProcessorsResponse(
processors=[processor.Processor(),], next_page_token="ghi",
),
document_processor_service.ListProcessorsResponse(
processors=[processor.Processor(), processor.Processor(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_processors(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, processor.Processor) for i in results)
def test_list_processors_pages(transport_name: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_processors), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
document_processor_service.ListProcessorsResponse(
processors=[
processor.Processor(),
processor.Processor(),
processor.Processor(),
],
next_page_token="abc",
),
document_processor_service.ListProcessorsResponse(
processors=[], next_page_token="def",
),
document_processor_service.ListProcessorsResponse(
processors=[processor.Processor(),], next_page_token="ghi",
),
document_processor_service.ListProcessorsResponse(
processors=[processor.Processor(), processor.Processor(),],
),
RuntimeError,
)
pages = list(client.list_processors(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_processors_async_pager():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_processors), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
document_processor_service.ListProcessorsResponse(
processors=[
processor.Processor(),
processor.Processor(),
processor.Processor(),
],
next_page_token="abc",
),
document_processor_service.ListProcessorsResponse(
processors=[], next_page_token="def",
),
document_processor_service.ListProcessorsResponse(
processors=[processor.Processor(),], next_page_token="ghi",
),
document_processor_service.ListProcessorsResponse(
processors=[processor.Processor(), processor.Processor(),],
),
RuntimeError,
)
async_pager = await client.list_processors(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, processor.Processor) for i in responses)
@pytest.mark.asyncio
async def test_list_processors_async_pages():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_processors), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
document_processor_service.ListProcessorsResponse(
processors=[
processor.Processor(),
processor.Processor(),
processor.Processor(),
],
next_page_token="abc",
),
document_processor_service.ListProcessorsResponse(
processors=[], next_page_token="def",
),
document_processor_service.ListProcessorsResponse(
processors=[processor.Processor(),], next_page_token="ghi",
),
document_processor_service.ListProcessorsResponse(
processors=[processor.Processor(), processor.Processor(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_processors(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [document_processor_service.CreateProcessorRequest, dict,]
)
def test_create_processor(request_type, transport: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_processor.Processor(
name="name_value",
type_="type__value",
display_name="display_name_value",
state=gcd_processor.Processor.State.ENABLED,
default_processor_version="default_processor_version_value",
process_endpoint="process_endpoint_value",
kms_key_name="kms_key_name_value",
)
response = client.create_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.CreateProcessorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_processor.Processor)
assert response.name == "name_value"
assert response.type_ == "type__value"
assert response.display_name == "display_name_value"
assert response.state == gcd_processor.Processor.State.ENABLED
assert response.default_processor_version == "default_processor_version_value"
assert response.process_endpoint == "process_endpoint_value"
assert response.kms_key_name == "kms_key_name_value"
def test_create_processor_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_processor), "__call__") as call:
client.create_processor()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.CreateProcessorRequest()
@pytest.mark.asyncio
async def test_create_processor_async(
transport: str = "grpc_asyncio",
request_type=document_processor_service.CreateProcessorRequest,
):
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_processor.Processor(
name="name_value",
type_="type__value",
display_name="display_name_value",
state=gcd_processor.Processor.State.ENABLED,
default_processor_version="default_processor_version_value",
process_endpoint="process_endpoint_value",
kms_key_name="kms_key_name_value",
)
)
response = await client.create_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.CreateProcessorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_processor.Processor)
assert response.name == "name_value"
assert response.type_ == "type__value"
assert response.display_name == "display_name_value"
assert response.state == gcd_processor.Processor.State.ENABLED
assert response.default_processor_version == "default_processor_version_value"
assert response.process_endpoint == "process_endpoint_value"
assert response.kms_key_name == "kms_key_name_value"
@pytest.mark.asyncio
async def test_create_processor_async_from_dict():
await test_create_processor_async(request_type=dict)
def test_create_processor_field_headers():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.CreateProcessorRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_processor), "__call__") as call:
call.return_value = gcd_processor.Processor()
client.create_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_processor_field_headers_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.CreateProcessorRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_processor), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_processor.Processor()
)
await client.create_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_processor_flattened():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_processor.Processor()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_processor(
parent="parent_value", processor=gcd_processor.Processor(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].processor
mock_val = gcd_processor.Processor(name="name_value")
assert arg == mock_val
def test_create_processor_flattened_error():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_processor(
document_processor_service.CreateProcessorRequest(),
parent="parent_value",
processor=gcd_processor.Processor(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_processor_flattened_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_processor.Processor()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_processor.Processor()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_processor(
parent="parent_value", processor=gcd_processor.Processor(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].processor
mock_val = gcd_processor.Processor(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_processor_flattened_error_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_processor(
document_processor_service.CreateProcessorRequest(),
parent="parent_value",
processor=gcd_processor.Processor(name="name_value"),
)
@pytest.mark.parametrize(
"request_type", [document_processor_service.DeleteProcessorRequest, dict,]
)
def test_delete_processor(request_type, transport: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.DeleteProcessorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_processor_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_processor), "__call__") as call:
client.delete_processor()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.DeleteProcessorRequest()
@pytest.mark.asyncio
async def test_delete_processor_async(
transport: str = "grpc_asyncio",
request_type=document_processor_service.DeleteProcessorRequest,
):
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.DeleteProcessorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_processor_async_from_dict():
await test_delete_processor_async(request_type=dict)
def test_delete_processor_field_headers():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.DeleteProcessorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_processor), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_processor_field_headers_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.DeleteProcessorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_processor), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_processor_flattened():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_processor(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_processor_flattened_error():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_processor(
document_processor_service.DeleteProcessorRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_processor_flattened_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_processor(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_processor_flattened_error_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_processor(
document_processor_service.DeleteProcessorRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [document_processor_service.EnableProcessorRequest, dict,]
)
def test_enable_processor(request_type, transport: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.enable_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.enable_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.EnableProcessorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_enable_processor_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.enable_processor), "__call__") as call:
client.enable_processor()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.EnableProcessorRequest()
@pytest.mark.asyncio
async def test_enable_processor_async(
transport: str = "grpc_asyncio",
request_type=document_processor_service.EnableProcessorRequest,
):
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.enable_processor), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.enable_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.EnableProcessorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_enable_processor_async_from_dict():
await test_enable_processor_async(request_type=dict)
def test_enable_processor_field_headers():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.EnableProcessorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.enable_processor), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.enable_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_enable_processor_field_headers_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.EnableProcessorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.enable_processor), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.enable_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [document_processor_service.DisableProcessorRequest, dict,]
)
def test_disable_processor(request_type, transport: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_processor), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.disable_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.DisableProcessorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_disable_processor_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_processor), "__call__"
) as call:
client.disable_processor()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.DisableProcessorRequest()
@pytest.mark.asyncio
async def test_disable_processor_async(
transport: str = "grpc_asyncio",
request_type=document_processor_service.DisableProcessorRequest,
):
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_processor), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.disable_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.DisableProcessorRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_disable_processor_async_from_dict():
await test_disable_processor_async(request_type=dict)
def test_disable_processor_field_headers():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.DisableProcessorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_processor), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.disable_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_disable_processor_field_headers_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.DisableProcessorRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.disable_processor), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.disable_processor(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [document_processor_service.ReviewDocumentRequest, dict,]
)
def test_review_document(request_type, transport: str = "grpc"):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.review_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.review_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.ReviewDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_review_document_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.review_document), "__call__") as call:
client.review_document()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.ReviewDocumentRequest()
@pytest.mark.asyncio
async def test_review_document_async(
transport: str = "grpc_asyncio",
request_type=document_processor_service.ReviewDocumentRequest,
):
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.review_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.review_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document_processor_service.ReviewDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_review_document_async_from_dict():
await test_review_document_async(request_type=dict)
def test_review_document_field_headers():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.ReviewDocumentRequest()
request.human_review_config = "human_review_config/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.review_document), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.review_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"human_review_config=human_review_config/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_review_document_field_headers_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_processor_service.ReviewDocumentRequest()
request.human_review_config = "human_review_config/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.review_document), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.review_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"human_review_config=human_review_config/value",
) in kw["metadata"]
def test_review_document_flattened():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.review_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.review_document(human_review_config="human_review_config_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].human_review_config
mock_val = "human_review_config_value"
assert arg == mock_val
def test_review_document_flattened_error():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.review_document(
document_processor_service.ReviewDocumentRequest(),
human_review_config="human_review_config_value",
)
@pytest.mark.asyncio
async def test_review_document_flattened_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.review_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.review_document(
human_review_config="human_review_config_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].human_review_config
mock_val = "human_review_config_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_review_document_flattened_error_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.review_document(
document_processor_service.ReviewDocumentRequest(),
human_review_config="human_review_config_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DocumentProcessorServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DocumentProcessorServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DocumentProcessorServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.DocumentProcessorServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DocumentProcessorServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DocumentProcessorServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.DocumentProcessorServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DocumentProcessorServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentProcessorServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DocumentProcessorServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentProcessorServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DocumentProcessorServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.DocumentProcessorServiceGrpcTransport,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.DocumentProcessorServiceGrpcTransport,
)
def test_document_processor_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DocumentProcessorServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_document_processor_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.documentai_v1beta3.services.document_processor_service.transports.DocumentProcessorServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DocumentProcessorServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"process_document",
"batch_process_documents",
"fetch_processor_types",
"list_processors",
"create_processor",
"delete_processor",
"enable_processor",
"disable_processor",
"review_document",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_document_processor_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.documentai_v1beta3.services.document_processor_service.transports.DocumentProcessorServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DocumentProcessorServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_document_processor_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.documentai_v1beta3.services.document_processor_service.transports.DocumentProcessorServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DocumentProcessorServiceTransport()
adc.assert_called_once()
def test_document_processor_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DocumentProcessorServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DocumentProcessorServiceGrpcTransport,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
],
)
def test_document_processor_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DocumentProcessorServiceGrpcTransport, grpc_helpers),
(transports.DocumentProcessorServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_document_processor_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"documentai.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="documentai.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DocumentProcessorServiceGrpcTransport,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
],
)
def test_document_processor_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_document_processor_service_host_no_port():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="documentai.googleapis.com"
),
)
assert client.transport._host == "documentai.googleapis.com:443"
def test_document_processor_service_host_with_port():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="documentai.googleapis.com:8000"
),
)
assert client.transport._host == "documentai.googleapis.com:8000"
def test_document_processor_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DocumentProcessorServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_document_processor_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DocumentProcessorServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DocumentProcessorServiceGrpcTransport,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
],
)
def test_document_processor_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DocumentProcessorServiceGrpcTransport,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
],
)
def test_document_processor_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_document_processor_service_grpc_lro_client():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_document_processor_service_grpc_lro_async_client():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_human_review_config_path():
project = "squid"
location = "clam"
processor = "whelk"
expected = "projects/{project}/locations/{location}/processors/{processor}/humanReviewConfig".format(
project=project, location=location, processor=processor,
)
actual = DocumentProcessorServiceClient.human_review_config_path(
project, location, processor
)
assert expected == actual
def test_parse_human_review_config_path():
expected = {
"project": "octopus",
"location": "oyster",
"processor": "nudibranch",
}
path = DocumentProcessorServiceClient.human_review_config_path(**expected)
# Check that the path construction is reversible.
actual = DocumentProcessorServiceClient.parse_human_review_config_path(path)
assert expected == actual
def test_processor_path():
project = "cuttlefish"
location = "mussel"
processor = "winkle"
expected = "projects/{project}/locations/{location}/processors/{processor}".format(
project=project, location=location, processor=processor,
)
actual = DocumentProcessorServiceClient.processor_path(project, location, processor)
assert expected == actual
def test_parse_processor_path():
expected = {
"project": "nautilus",
"location": "scallop",
"processor": "abalone",
}
path = DocumentProcessorServiceClient.processor_path(**expected)
# Check that the path construction is reversible.
actual = DocumentProcessorServiceClient.parse_processor_path(path)
assert expected == actual
def test_processor_type_path():
project = "squid"
location = "clam"
processor_type = "whelk"
expected = "projects/{project}/locations/{location}/processorTypes/{processor_type}".format(
project=project, location=location, processor_type=processor_type,
)
actual = DocumentProcessorServiceClient.processor_type_path(
project, location, processor_type
)
assert expected == actual
def test_parse_processor_type_path():
expected = {
"project": "octopus",
"location": "oyster",
"processor_type": "nudibranch",
}
path = DocumentProcessorServiceClient.processor_type_path(**expected)
# Check that the path construction is reversible.
actual = DocumentProcessorServiceClient.parse_processor_type_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DocumentProcessorServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = DocumentProcessorServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DocumentProcessorServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = DocumentProcessorServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = DocumentProcessorServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DocumentProcessorServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = DocumentProcessorServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = DocumentProcessorServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DocumentProcessorServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = DocumentProcessorServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = DocumentProcessorServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DocumentProcessorServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DocumentProcessorServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = DocumentProcessorServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DocumentProcessorServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DocumentProcessorServiceTransport, "_prep_wrapped_messages"
) as prep:
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DocumentProcessorServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DocumentProcessorServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = DocumentProcessorServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = DocumentProcessorServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
DocumentProcessorServiceClient,
transports.DocumentProcessorServiceGrpcTransport,
),
(
DocumentProcessorServiceAsyncClient,
transports.DocumentProcessorServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-documentai
|
tests/unit/gapic/documentai_v1beta3/test_document_processor_service.py
|
Python
|
apache-2.0
| 131,323
|
[
"Octopus"
] |
f011262fd20e17c5cdb109855daf3ca7fa277dd18b46805723d4c1f0965129e6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on May 1, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "May 1, 2012"
import unittest2 as unittest
import os
import json
from io import open
try:
import matplotlib
matplotlib.use("pdf") # Use non-graphical display backend during test.
have_matplotlib = "DISPLAY" in os.environ
except ImportError:
have_matplotlib = False
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.plotter import DosPlotter, BSPlotter, plot_ellipsoid, fold_point, plot_brillouin_zone
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.core.structure import Structure
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
import scipy
class DosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "complete_dos.json"), "r",
encoding='utf-8') as f:
self.dos = CompleteDos.from_dict(json.load(f))
self.plotter = DosPlotter(sigma=0.2, stack=True)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 4)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Li", "Fe", "P", "O"]:
self.assertIn(el, d)
class BSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "CaO_2605_bandstructure.json"),
"r", encoding='utf-8') as f:
d = json.loads(f.read())
self.bs = BandStructureSymmLine.from_dict(d)
self.plotter = BSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(len(self.plotter.bs_plot_data()['distances'][0]), 16,
"wrong number of distances in the first branch")
self.assertEqual(len(self.plotter.bs_plot_data()['distances']), 10,
"wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()['distances']]),
160, "wrong number of distances")
self.assertEqual(self.plotter.bs_plot_data()['ticks']['label'][5], "K",
"wrong tick label")
self.assertEqual(len(self.plotter.bs_plot_data()['ticks']['label']),
19, "wrong number of tick labels")
class PlotBZTest(unittest.TestCase):
def setUp(self):
if not have_matplotlib:
raise unittest.SkipTest("matplotlib not available")
self.rec_latt = Structure.from_file(os.path.join(test_dir, "Si.cssr")).lattice.reciprocal_lattice
self.kpath = [[[0., 0., 0.], [0.5, 0., 0.5], [0.5, 0.25, 0.75], [0.375, 0.375, 0.75]]]
self.labels = {'\\Gamma': [0., 0., 0.], 'K': [0.375, 0.375, 0.75], u'L': [0.5, 0.5, 0.5],
'U': [0.625, 0.25, 0.625], 'W': [0.5, 0.25, 0.75], 'X': [0.5, 0., 0.5]}
self.hessian = [[17.64757034, 3.90159625, -4.77845607],
[3.90159625, 14.88874142, 6.75776076],
[-4.77845607, 6.75776076, 12.12987493]]
self.center = [0.41, 0., 0.41]
self.points = [[0., 0., 0.], [0.5, 0.5, 0.5]]
def test_bz_plot(self):
fig, ax = plot_ellipsoid(self.hessian, self.center, lattice=self.rec_latt)
plot_brillouin_zone(self.rec_latt, lines=self.kpath, labels=self.labels, kpoints=self.points, ax=ax, show=False)
def test_fold_point(self):
self.assertTrue(scipy.allclose(fold_point([0., -0.5, 0.5], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0., 0.5, 0.5])))
self.assertTrue(scipy.allclose(fold_point([0.1, -0.6, 0.2], lattice=self.rec_latt),
self.rec_latt.get_cartesian_coords([0.1, 0.4, 0.2])))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
aykol/pymatgen
|
pymatgen/electronic_structure/tests/test_plotter.py
|
Python
|
mit
| 4,584
|
[
"pymatgen"
] |
f2391d5d81dc0e99e08c32acd85db2125eae9b529b2a404dc3bdbafa67a85c31
|
""" Affine image registration module consisting of the following classes:
AffineMap: encapsulates the necessary information to perform affine
transforms between two domains, defined by a `static` and a `moving`
image. The `domain` of the transform is the set of points in the
`static` image's grid, and the `codomain` is the set of points in
the `moving` image. When we call the `transform` method, `AffineMap`
maps each point `x` of the domain (`static` grid) to the codomain
(`moving` grid) and interpolates the `moving` image at that point
to obtain the intensity value to be placed at `x` in the resulting
grid. The `transform_inverse` method performs the opposite operation
mapping points in the codomain to points in the domain.
ParzenJointHistogram: computes the marginal and joint distributions of
intensities of a pair of images, using Parzen windows [Parzen62]
with a cubic spline kernel, as proposed by Mattes et al. [Mattes03].
It also computes the gradient of the joint histogram w.r.t. the
parameters of a given transform.
MutualInformationMetric: computes the value and gradient of the mutual
information metric the way `Optimizer` needs them. That is, given
a set of transform parameters, it will use `ParzenJointHistogram`
to compute the value and gradient of the joint intensity histogram
evaluated at the given parameters, and evaluate the the value and
gradient of the histogram's mutual information.
AffineRegistration: it runs the multi-resolution registration, putting
all the pieces together. It needs to create the scale space of the
images and run the multi-resolution registration by using the Metric
and the Optimizer at each level of the Gaussian pyramid. At each
level, it will setup the metric to compute value and gradient of the
metric with the input images with different levels of smoothing.
References
----------
[Parzen62] E. Parzen. On the estimation of a probability density
function and the mode. Annals of Mathematical Statistics,
33(3), 1065-1076, 1962.
[Mattes03] Mattes, D., Haynor, D. R., Vesselle, H., Lewellen, T. K.,
& Eubank, W. PET-CT image registration in the chest using
free-form deformations. IEEE Transactions on Medical
Imaging, 22(1), 120-8, 2003.
"""
import numpy as np
import numpy.linalg as npl
import scipy.ndimage as ndimage
from ..core.optimize import Optimizer
from ..core.optimize import SCIPY_LESS_0_12
from . import vector_fields as vf
from . import VerbosityLevels
from .parzenhist import (ParzenJointHistogram,
sample_domain_regular,
compute_parzen_mi)
from .imwarp import (get_direction_and_spacings, ScaleSpace)
from .scalespace import IsotropicScaleSpace
from warnings import warn
_interp_options = ['nearest', 'linear']
_transform_method = {}
_transform_method[(2, 'nearest')] = vf.transform_2d_affine_nn
_transform_method[(3, 'nearest')] = vf.transform_3d_affine_nn
_transform_method[(2, 'linear')] = vf.transform_2d_affine
_transform_method[(3, 'linear')] = vf.transform_3d_affine
class AffineInversionError(Exception):
pass
class AffineMap(object):
def __init__(self, affine, domain_grid_shape=None, domain_grid2world=None,
codomain_grid_shape=None, codomain_grid2world=None):
""" AffineMap
Implements an affine transformation whose domain is given by
`domain_grid` and `domain_grid2world`, and whose co-domain is
given by `codomain_grid` and `codomain_grid2world`.
The actual transform is represented by the `affine` matrix, which
operate in world coordinates. Therefore, to transform a moving image
towards a static image, we first map each voxel (i,j,k) of the static
image to world coordinates (x,y,z) by applying `domain_grid2world`.
Then we apply the `affine` transform to (x,y,z) obtaining (x', y', z')
in moving image's world coordinates. Finally, (x', y', z') is mapped
to voxel coordinates (i', j', k') in the moving image by multiplying
(x', y', z') by the inverse of `codomain_grid2world`. The
`codomain_grid_shape` is used analogously to transform the static
image towards the moving image when calling `transform_inverse`.
If the domain/co-domain information is not provided (None) then the
sampling information needs to be specified each time the `transform`
or `transform_inverse` is called to transform images. Note that such
sampling information is not necessary to transform points defined in
physical space, such as stream lines.
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix defining the affine transform, where `dim` is the
dimension of the space this map operates in (2 for 2D images,
3 for 3D images). If None, then `self` represents the identity
transformation.
domain_grid_shape : sequence, shape (dim,), optional
the shape of the default domain sampling grid. When `transform`
is called to transform an image, the resulting image will have
this shape, unless a different sampling information is provided.
If None, then the sampling grid shape must be specified each time
the `transform` method is called.
domain_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
codomain_grid_shape : sequence of integers, shape (dim,)
the shape of the default co-domain sampling grid. When
`transform_inverse` is called to transform an image, the resulting
image will have this shape, unless a different sampling
information is provided. If None (the default), then the sampling
grid shape must be specified each time the `transform_inverse`
method is called.
codomain_grid2world : array, shape (dim + 1, dim + 1)
the grid-to-world transform associated with the co-domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
"""
self.set_affine(affine)
self.domain_shape = domain_grid_shape
self.domain_grid2world = domain_grid2world
self.codomain_shape = codomain_grid_shape
self.codomain_grid2world = codomain_grid2world
def set_affine(self, affine):
""" Sets the affine transform (operating in physical space)
Also sets `self.affine_inv` - the inverse of `affine`, or None if
there is no inverse.
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix representing the affine transform operating in
physical space. The domain and co-domain information
remains unchanged. If None, then `self` represents the identity
transformation.
"""
self.affine = affine
self.affine_inv = None
if self.affine is None:
return
if not np.all(np.isfinite(affine)):
raise AffineInversionError('Affine contains invalid elements')
try:
self.affine_inv = npl.inv(affine)
except npl.LinAlgError:
raise AffineInversionError('Affine cannot be inverted')
def _apply_transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False, apply_inverse=False):
""" Transforms the input image applying this affine transform
This is a generic function to transform images using either this
(direct) transform or its inverse.
If applying the direct transform (`apply_inverse=False`):
by default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`.
If applying the inverse transform (`apply_inverse=True`):
by default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`.
If the sampling information was not provided at initialization of this
transform then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.domain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.domain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
apply_inverse : Boolean, optional
If False (the default) the image is transformed from the codomain
of this transform to its domain using the (direct) affine
transform. Otherwise, the image is transformed from the domain
of this transform to its codomain using the (inverse) affine
transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or `self.domain_shape`
the transformed image, sampled at the requested grid
"""
# Verify valid interpolation requested
if interp not in _interp_options:
raise ValueError('Unknown interpolation method: %s' % (interp,))
# Obtain sampling grid
if sampling_grid_shape is None:
if apply_inverse:
sampling_grid_shape = self.codomain_shape
else:
sampling_grid_shape = self.domain_shape
if sampling_grid_shape is None:
msg = 'Unknown sampling info. Provide a valid sampling_grid_shape'
raise ValueError(msg)
dim = len(sampling_grid_shape)
shape = np.array(sampling_grid_shape, dtype=np.int32)
# Verify valid image dimension
img_dim = len(image.shape)
if img_dim < 2 or img_dim > 3:
raise ValueError('Undefined transform for dim: %d' % (img_dim,))
# Obtain grid-to-world transform for sampling grid
if sampling_grid2world is None:
if apply_inverse:
sampling_grid2world = self.codomain_grid2world
else:
sampling_grid2world = self.domain_grid2world
if sampling_grid2world is None:
sampling_grid2world = np.eye(dim + 1)
# Obtain world-to-grid transform for input image
if image_grid2world is None:
if apply_inverse:
image_grid2world = self.domain_grid2world
else:
image_grid2world = self.codomain_grid2world
if image_grid2world is None:
image_grid2world = np.eye(dim + 1)
image_world2grid = npl.inv(image_grid2world)
# Compute the transform from sampling grid to input image grid
if apply_inverse:
aff = self.affine_inv
else:
aff = self.affine
if (aff is None) or resample_only:
comp = image_world2grid.dot(sampling_grid2world)
else:
comp = image_world2grid.dot(aff.dot(sampling_grid2world))
# Transform the input image
if interp == 'linear':
image = image.astype(np.float64)
transformed = _transform_method[(dim, interp)](image, shape, comp)
return transformed
def transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from co-domain to domain space
By default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=False)
return np.array(transformed)
def transform_inverse(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from domain to co-domain space
By default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=True)
return np.array(transformed)
class MutualInformationMetric(object):
def __init__(self, nbins=32, sampling_proportion=None):
r""" Initializes an instance of the Mutual Information metric
This class implements the methods required by Optimizer to drive the
registration process.
Parameters
----------
nbins : int, optional
the number of bins to be used for computing the intensity
histograms. The default is 32.
sampling_proportion : None or float in interval (0, 1], optional
There are two types of sampling: dense and sparse. Dense sampling
uses all voxels for estimating the (joint and marginal) intensity
histograms, while sparse sampling uses a subset of them. If
`sampling_proportion` is None, then dense sampling is
used. If `sampling_proportion` is a floating point value in (0,1]
then sparse sampling is used, where `sampling_proportion`
specifies the proportion of voxels to be used. The default is
None.
Notes
-----
Since we use linear interpolation, images are not, in general,
differentiable at exact voxel coordinates, but they are differentiable
between voxel coordinates. When using sparse sampling, selected voxels
are slightly moved by adding a small random displacement within one
voxel to prevent sampling points from being located exactly at voxel
coordinates. When using dense sampling, this random displacement is
not applied.
"""
self.histogram = ParzenJointHistogram(nbins)
self.sampling_proportion = sampling_proportion
self.metric_val = None
self.metric_grad = None
def setup(self, transform, static, moving, static_grid2world=None,
moving_grid2world=None, starting_affine=None):
r""" Prepares the metric to compute intensity densities and gradients
The histograms will be setup to compute probability densities of
intensities within the minimum and maximum values of `static` and
`moving`
Parameters
----------
transform: instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
static : array, shape (S, R, C) or (R, C)
static image
moving : array, shape (S', R', C') or (R', C')
moving image. The dimensions of the static (S, R, C) and moving
(S', R', C') images do not need to be the same.
static_grid2world : array (dim+1, dim+1), optional
the grid-to-space transform of the static image. The default is
None, implying the transform is the identity.
moving_grid2world : array (dim+1, dim+1)
the grid-to-space transform of the moving image. The default is
None, implying the spacing along all axes is 1.
starting_affine : array, shape (dim+1, dim+1), optional
the pre-aligning matrix (an affine transform) that roughly aligns
the moving image towards the static image. If None, no
pre-alignment is performed. If a pre-alignment matrix is available,
it is recommended to provide this matrix as `starting_affine`
instead of manually transforming the moving image to reduce
interpolation artifacts. The default is None, implying no
pre-alignment is performed.
"""
n = transform.get_number_of_parameters()
self.metric_grad = np.zeros(n, dtype=np.float64)
self.dim = len(static.shape)
if moving_grid2world is None:
moving_grid2world = np.eye(self.dim + 1)
if static_grid2world is None:
static_grid2world = np.eye(self.dim + 1)
self.transform = transform
self.static = np.array(static).astype(np.float64)
self.moving = np.array(moving).astype(np.float64)
self.static_grid2world = static_grid2world
self.static_world2grid = npl.inv(static_grid2world)
self.moving_grid2world = moving_grid2world
self.moving_world2grid = npl.inv(moving_grid2world)
self.static_direction, self.static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
self.moving_direction, self.moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
self.starting_affine = starting_affine
P = np.eye(self.dim + 1)
if self.starting_affine is not None:
P = self.starting_affine
self.affine_map = AffineMap(P, static.shape, static_grid2world,
moving.shape, moving_grid2world)
if self.dim == 2:
self.interp_method = vf.interpolate_scalar_2d
else:
self.interp_method = vf.interpolate_scalar_3d
if self.sampling_proportion is None:
self.samples = None
self.ns = 0
else:
k = int(np.ceil(1.0 / self.sampling_proportion))
shape = np.array(static.shape, dtype=np.int32)
self.samples = sample_domain_regular(k, shape, static_grid2world)
self.samples = np.array(self.samples)
self.ns = self.samples.shape[0]
# Add a column of ones (homogeneous coordinates)
self.samples = np.hstack((self.samples, np.ones(self.ns)[:, None]))
if self.starting_affine is None:
self.samples_prealigned = self.samples
else:
self.samples_prealigned =\
self.starting_affine.dot(self.samples.T).T
# Sample the static image
static_p = self.static_world2grid.dot(self.samples.T).T
static_p = static_p[..., :self.dim]
self.static_vals, inside = self.interp_method(static, static_p)
self.static_vals = np.array(self.static_vals, dtype=np.float64)
self.histogram.setup(self.static, self.moving)
def _update_histogram(self):
r""" Updates the histogram according to the current affine transform
The current affine transform is given by `self.affine_map`, which
must be set before calling this method.
Returns
-------
static_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the static image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the static
image at the `n` sampling points. If dense sampling is being used,
then the intensities are given directly by the static image,
whose shape is (S, R, C) in the 3D case or (R, C) in the 2D case.
moving_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the moving image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the moving
image at the `n` sampling points (mapped to the moving space by the
current affine transform). If dense sampling is being used,
then the intensities are given by the moving imaged linearly
transformed towards the static image by the current affine, which
results in an image of the same shape as the static image.
"""
static_values = None
moving_values = None
if self.sampling_proportion is None: # Dense case
static_values = self.static
moving_values = self.affine_map.transform(self.moving)
self.histogram.update_pdfs_dense(static_values, moving_values)
else: # Sparse case
sp_to_moving = self.moving_world2grid.dot(self.affine_map.affine)
pts = sp_to_moving.dot(self.samples.T).T # Points on moving grid
pts = pts[..., :self.dim]
self.moving_vals, inside = self.interp_method(self.moving, pts)
self.moving_vals = np.array(self.moving_vals)
static_values = self.static_vals
moving_values = self.moving_vals
self.histogram.update_pdfs_sparse(static_values, moving_values)
return static_values, moving_values
def _update_mutual_information(self, params, update_gradient=True):
r""" Updates marginal and joint distributions and the joint gradient
The distributions are updated according to the static and transformed
images. The transformed image is precisely the moving image after
transforming it by the transform defined by the `params` parameters.
The gradient of the joint PDF is computed only if update_gradient
is True.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
update_gradient : Boolean, optional
if True, the gradient of the joint PDF will also be computed,
otherwise, only the marginal and joint PDFs will be computed.
The default is True.
"""
# Get the matrix associated with the `params` parameter vector
current_affine = self.transform.param_to_matrix(params)
# Get the static-to-prealigned matrix (only needed for the MI gradient)
static2prealigned = self.static_grid2world
if self.starting_affine is not None:
current_affine = current_affine.dot(self.starting_affine)
static2prealigned = self.starting_affine.dot(static2prealigned)
self.affine_map.set_affine(current_affine)
# Update the histogram with the current joint intensities
static_values, moving_values = self._update_histogram()
H = self.histogram # Shortcut to `self.histogram`
grad = None # Buffer to write the MI gradient into (if needed)
if update_gradient:
grad = self.metric_grad
# Compute the gradient of the joint PDF w.r.t. parameters
if self.sampling_proportion is None: # Dense case
# Compute the gradient of moving img. at physical points
# associated with the >>static image's grid<< cells
# The image gradient must be eval. at current moved points
grid_to_world = current_affine.dot(self.static_grid2world)
mgrad, inside = vf.gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
self.static.shape,
grid_to_world)
# The Jacobian must be evaluated at the pre-aligned points
H.update_gradient_dense(params, self.transform, static_values,
moving_values, static2prealigned, mgrad)
else: # Sparse case
# Compute the gradient of moving at the sampling points
# which are already given in physical space coordinates
pts = current_affine.dot(self.samples.T).T # Moved points
mgrad, inside = vf.sparse_gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
pts)
# The Jacobian must be evaluated at the pre-aligned points
pts = self.samples_prealigned[..., :self.dim]
H.update_gradient_sparse(params, self.transform, static_values,
moving_values, pts, mgrad)
# Call the cythonized MI computation with self.histogram fields
self.metric_val = compute_parzen_mi(H.joint, H.joint_grad,
H.smarginal, H.mmarginal,
grad)
def distance(self, params):
r""" Numeric value of the negative Mutual Information
We need to change the sign so we can use standard minimization
algorithms.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
"""
try:
self._update_mutual_information(params, False)
except AffineInversionError:
return np.inf
return -1 * self.metric_val
def gradient(self, params):
r""" Numeric value of the metric's gradient at the given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except AffineInversionError:
return 0 * self.metric_grad
return -1 * self.metric_grad
def distance_and_gradient(self, params):
r""" Numeric value of the metric and its gradient at given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
neg_mi_grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except AffineInversionError:
return np.inf, 0 * self.metric_grad
return -1 * self.metric_val, -1 * self.metric_grad
class AffineRegistration(object):
def __init__(self,
metric=None,
level_iters=None,
sigmas=None,
factors=None,
method='L-BFGS-B',
ss_sigma_factor=None,
options=None):
r""" Initializes an instance of the AffineRegistration class
Parameters
----------
metric : None or object, optional
an instance of a metric. The default is None, implying
the Mutual Information metric with default settings.
level_iters : sequence, optional
the number of iterations at each scale of the scale space.
`level_iters[0]` corresponds to the coarsest scale,
`level_iters[-1]` the finest, where n is the length of the
sequence. By default, a 3-level scale space with iterations
sequence equal to [10000, 1000, 100] will be used.
sigmas : sequence of floats, optional
custom smoothing parameter to build the scale space (one parameter
for each scale). By default, the sequence of sigmas will be
[3, 1, 0].
factors : sequence of floats, optional
custom scale factors to build the scale space (one factor for each
scale). By default, the sequence of factors will be [4, 2, 1].
method : string, optional
optimization method to be used. If Scipy version < 0.12, then
only L-BFGS-B is available. Otherwise, `method` can be any
gradient-based method available in `dipy.core.Optimize`: CG, BFGS,
Newton-CG, dogleg or trust-ncg.
The default is 'L-BFGS-B'.
ss_sigma_factor : float, optional
If None, this parameter is not used and an isotropic scale
space with the given `factors` and `sigmas` will be built.
If not None, an anisotropic scale space will be used by
automatically selecting the smoothing sigmas along each axis
according to the voxel dimensions of the given image.
The `ss_sigma_factor` is used to scale the automatically computed
sigmas. For example, in the isotropic case, the sigma of the
kernel will be $factor * (2 ^ i)$ where
$i = 1, 2, ..., n_scales - 1$ is the scale (the finest resolution
image $i=0$ is never smoothed). The default is None.
options : dict, optional
extra optimization options. The default is None, implying
no extra options are passed to the optimizer.
"""
self.metric = metric
if self.metric is None:
self.metric = MutualInformationMetric()
if level_iters is None:
level_iters = [10000, 1000, 100]
self.level_iters = level_iters
self.levels = len(level_iters)
if self.levels == 0:
raise ValueError('The iterations sequence cannot be empty')
self.options = options
self.method = method
if ss_sigma_factor is not None:
self.use_isotropic = False
self.ss_sigma_factor = ss_sigma_factor
else:
self.use_isotropic = True
if factors is None:
factors = [4, 2, 1]
if sigmas is None:
sigmas = [3, 1, 0]
self.factors = factors
self.sigmas = sigmas
self.verbosity = VerbosityLevels.STATUS
def _init_optimizer(self, static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine):
r"""Initializes the registration optimizer
Initializes the optimizer by computing the scale space of the input
images
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. The
dimensions of the static (S, R, C) and moving (S', R', C') images
do not need to be the same.
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the static image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the moving image
starting_affine : string, or matrix, or None
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1)
If None:
Start from identity
"""
self.dim = len(static.shape)
self.transform = transform
n = transform.get_number_of_parameters()
self.nparams = n
if params0 is None:
params0 = self.transform.get_identity_parameters()
self.params0 = params0
if starting_affine is None:
self.starting_affine = np.eye(self.dim + 1)
elif starting_affine == 'mass':
affine_map = transform_centers_of_mass(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'voxel-origin':
affine_map = transform_origins(static, static_grid2world,
moving, moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'centers':
affine_map = transform_geometric_centers(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
elif (isinstance(starting_affine, np.ndarray) and
starting_affine.shape >= (self.dim, self.dim + 1)):
self.starting_affine = starting_affine
else:
raise ValueError('Invalid starting_affine matrix')
# Extract information from affine matrices to create the scale space
static_direction, static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
moving_direction, moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
static = ((static.astype(np.float64) - static.min()) /
(static.max() - static.min()))
moving = ((moving.astype(np.float64) - moving.min()) /
(moving.max() - moving.min()))
# Build the scale space of the input images
if self.use_isotropic:
self.moving_ss = IsotropicScaleSpace(moving, self.factors,
self.sigmas,
moving_grid2world,
moving_spacing, False)
self.static_ss = IsotropicScaleSpace(static, self.factors,
self.sigmas,
static_grid2world,
static_spacing, False)
else:
self.moving_ss = ScaleSpace(moving, self.levels, moving_grid2world,
moving_spacing, self.ss_sigma_factor,
False)
self.static_ss = ScaleSpace(static, self.levels, static_grid2world,
static_spacing, self.ss_sigma_factor,
False)
def optimize(self, static, moving, transform, params0,
static_grid2world=None, moving_grid2world=None,
starting_affine=None):
r''' Starts the optimization process
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. It is
necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed
to be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the
'starting_affine' matrix
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the static
image. The default is None, implying the transform is the
identity.
moving_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the moving
image. The default is None, implying the transform is the
identity.
starting_affine : string, or matrix, or None, optional
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1).
If None:
Start from identity.
The default is None.
Returns
-------
affine_map : instance of AffineMap
the affine resulting affine transformation
'''
self._init_optimizer(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine)
del starting_affine # Now we must refer to self.starting_affine
# Multi-resolution iterations
original_static_shape = self.static_ss.get_image(0).shape
original_static_grid2world = self.static_ss.get_affine(0)
original_moving_shape = self.moving_ss.get_image(0).shape
original_moving_grid2world = self.moving_ss.get_affine(0)
affine_map = AffineMap(None,
original_static_shape,
original_static_grid2world,
original_moving_shape,
original_moving_grid2world)
for level in range(self.levels - 1, -1, -1):
self.current_level = level
max_iter = self.level_iters[-1 - level]
if self.verbosity >= VerbosityLevels.STATUS:
print('Optimizing level %d [max iter: %d]' % (level, max_iter))
# Resample the smooth static image to the shape of this level
smooth_static = self.static_ss.get_image(level)
current_static_shape = self.static_ss.get_domain_shape(level)
current_static_grid2world = self.static_ss.get_affine(level)
current_affine_map = AffineMap(None,
current_static_shape,
current_static_grid2world,
original_static_shape,
original_static_grid2world)
current_static = current_affine_map.transform(smooth_static)
# The moving image is full resolution
current_moving_grid2world = original_moving_grid2world
current_moving = self.moving_ss.get_image(level)
# Prepare the metric for iterations at this resolution
self.metric.setup(transform, current_static, current_moving,
current_static_grid2world,
current_moving_grid2world, self.starting_affine)
# Optimize this level
if self.options is None:
self.options = {'gtol': 1e-4,
'disp': False}
if self.method == 'L-BFGS-B':
self.options['maxfun'] = max_iter
else:
self.options['maxiter'] = max_iter
if SCIPY_LESS_0_12:
# Older versions don't expect value and gradient from
# the same function
opt = Optimizer(self.metric.distance, self.params0,
method=self.method, jac=self.metric.gradient,
options=self.options)
else:
opt = Optimizer(self.metric.distance_and_gradient, self.params0,
method=self.method, jac=True,
options=self.options)
params = opt.xopt
# Update starting_affine matrix with optimal parameters
T = self.transform.param_to_matrix(params)
self.starting_affine = T.dot(self.starting_affine)
# Start next iteration at identity
self.params0 = self.transform.get_identity_parameters()
affine_map.set_affine(self.starting_affine)
return affine_map
def align_centers_of_mass(static, static_grid2world,
moving, moving_grid2world):
msg = "This function is deprecated please use"
msg += " dipy.align.imaffine.transform_centers_of_mass instead."
warn(msg)
return transform_centers_of_mass(static, static_grid2world,
moving, moving_grid2world)
def align_geometric_centers(static, static_grid2world,
moving, moving_grid2world):
msg = "This function is deprecated please use"
msg += " dipy.align.imaffine.transform_geometric_centers instead."
warn(msg)
return transform_geometric_centers(static, static_grid2world,
moving, moving_grid2world)
def align_origins(static, static_grid2world,
moving, moving_grid2world):
msg = "This function is deprecated please use"
msg += " dipy.align.imaffine.transform_origins instead."
warn(msg)
return transform_origins(static, static_grid2world,
moving, moving_grid2world)
def transform_centers_of_mass(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the center of mass of the input images
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the center of mass of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = ndimage.measurements.center_of_mass(np.array(static))
c_static = static_grid2world.dot(c_static+(1,))
c_moving = ndimage.measurements.center_of_mass(np.array(moving))
c_moving = moving_grid2world.dot(c_moving+(1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def transform_geometric_centers(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the geometric center of the input images
With "geometric center" of a volume we mean the physical coordinates of
its central voxel
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the geometric center of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = tuple((np.array(static.shape, dtype=np.float64)) * 0.5)
c_static = static_grid2world.dot(c_static+(1,))
c_moving = tuple((np.array(moving.shape, dtype=np.float64)) * 0.5)
c_moving = moving_grid2world.dot(c_moving+(1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def transform_origins(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the origins of the input images
With "origin" of a volume we mean the physical coordinates of
voxel (0,0,0)
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the origin of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = static_grid2world[:dim, dim]
c_moving = moving_grid2world[:dim, dim]
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
|
demianw/dipy
|
dipy/align/imaffine.py
|
Python
|
bsd-3-clause
| 52,941
|
[
"Gaussian"
] |
2b7a997035b86a7c88821374bf887c48536c0980302c8501e805fe5d913e35f5
|
#!/usr/bin/env python
import os
import sys
from subprocess import check_output
from distutils import log
from distutils.core import Command
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from setuptools.command.develop import develop as DevelopCommand
from setuptools.command.sdist import sdist as SDistCommand
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
execfile('strongpoc/version.py')
with open('requirements.txt') as requirements:
required = requirements.read().splitlines()
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
log.info('Running python tests...')
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
class DevelopWithBuildStatic(DevelopCommand):
def install_for_development(self):
self.run_command('build_static')
return DevelopCommand.install_for_development(self)
class SDistWithBuildStatic(SDistCommand):
def run(self):
self.run_command('build_static')
SDistCommand.run(self)
class BuildStatic(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
log.info('running [npm install --quiet]')
check_output(['npm', 'install', '--quiet'], cwd=ROOT)
log.info('running [gulp clean]')
check_output([os.path.join(ROOT, 'node_modules', '.bin', 'gulp'), 'clean'], cwd=ROOT)
log.info('running [gulp build]')
check_output([os.path.join(ROOT, 'node_modules', '.bin', 'gulp'), 'build'], cwd=ROOT)
kwargs = {
'name': 'strongpoc',
'version': str(__version__),
'packages': find_packages(exclude=['tests*']),
'include_package_data': True,
'description': 'Strong Point of Contact (Team contact management)',
'author': 'Digant C Kasundra',
'maintainer': 'Digant C Kasundra',
'author_email': '[email protected]',
'maintainer_email': '[email protected]',
'license': 'Apache',
'install_requires': required,
'url': 'https://github.com/dropbox/strongpoc',
'tests_require': ['pytest'],
'cmdclass': {
'test': PyTest,
# 'build_static': BuildStatic,
# 'develop': DevelopWithBuildStatic,
# 'sdist': SDistWithBuildStatic,
},
'entry_points': """
[console_scripts]
strongpoc=strongpoc.cli:main
strongpoc-srv=strongpoc.server:main
""",
'classifiers': [
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
]
}
setup(**kwargs)
|
dropbox/strongpoc
|
setup.py
|
Python
|
apache-2.0
| 3,108
|
[
"GULP"
] |
04aea004aa3c7a154611abb6495f997b936ab838d46ec90d6882a987ada31ff4
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.layers
from tensorflow.contrib.layers.python import layers as tf_layers
from models.conv_lstm import basic_conv_lstm_cell
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
FC_LAYER_SIZE = 512
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def encoder_model(frames, sequence_length, initializer, scope='encoder', fc_conv_layer=False):
"""
Args:
frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels)
sequence_length: number of frames that shall be encoded
scope: tensorflow variable scope name
initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data)
fc_conv_layer: adds an fc layer at the end of the encoder
Returns:
hidden4: hidden state of highest ConvLSTM layer
fc_conv_layer: indicated whether a Fully Convolutional (8x8x16 -> 1x1x1024) shall be added
"""
lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None
for i in range(sequence_length):
frame = frames[:,i,:,:,:]
reuse = (i > 0)
with tf.variable_scope(scope, reuse=reuse):
#LAYER 1: conv1
conv1 = slim.layers.conv2d(frame, 16, [5, 5], stride=2, scope='conv1', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm1'})
#LAYER 2: convLSTM1
hidden1, lstm_state1 = basic_conv_lstm_cell(conv1, lstm_state1, 16, initializer, filter_size=5, scope='convlstm1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
#LAYER 3: conv2
conv2 = slim.layers.conv2d(hidden1, hidden1.get_shape()[3], [5, 5], stride=2, scope='conv2', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm3'})
#LAYER 4: convLSTM2
hidden2, lstm_state2 = basic_conv_lstm_cell(conv2, lstm_state2, 16, initializer, filter_size=5, scope='convlstm2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm4')
#LAYER 5: conv3
conv3 = slim.layers.conv2d(hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv3', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm5'})
#LAYER 6: convLSTM3
hidden3, lstm_state3 = basic_conv_lstm_cell(conv3, lstm_state3, 16, initializer, filter_size=3, scope='convlstm3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm6')
#LAYER 7: conv4
conv4 = slim.layers.conv2d(hidden3, hidden3.get_shape()[3], [3, 3], stride=2, scope='conv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,
normalizer_params={'scope': 'layer_norm7'})
#LAYER 8: convLSTM4 (8x8 featuremap size)
hidden4, lstm_state4 = basic_conv_lstm_cell(conv4, lstm_state4, 16, initializer, filter_size=3, scope='convlstm4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm8')
#LAYER 9: Fully Convolutional Layer (8x8x16 --> 1x1xFC_LAYER_SIZE)
if fc_conv_layer:
fc_conv = slim.layers.conv2d(hidden4, FC_LAYER_SIZE, [8,8], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer)
hidden_repr = fc_conv
else:
hidden_repr = hidden4
return hidden_repr
def decoder_model(hidden_repr, sequence_length, initializer, num_channels=3, scope='decoder', fc_conv_layer=False):
"""
Args:
hidden_repr: Tensor of latent space representation
sequence_length: number of frames that shall be decoded from the hidden_repr
num_channels: number of channels for generated frames
initializer: specifies
fc_conv_layer: adds an fc layer at the end of the decoder
Returns:
frame_gen: array of generated frames (Tensors)
fc_conv_layer: indicates whether hidden_repr is 1x1xdepth tensor a and fully concolutional layer shall be added
"""
frame_gen = []
lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None
assert (not fc_conv_layer) or (hidden_repr.get_shape()[1] == hidden_repr.get_shape()[2] == 1)
for i in range(sequence_length):
reuse = (i > 0) #reuse variables (recurrence) after first time step
with tf.variable_scope(scope, reuse=reuse):
#Fully Convolutional Layer (1x1xFC_LAYER_SIZE -> 8x8x16)
if fc_conv_layer:
fc_conv = slim.layers.conv2d_transpose(hidden_repr, 16, [8, 8], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer)
hidden1_input = fc_conv
else:
hidden1_input = hidden_repr
#LAYER 1: convLSTM1
hidden1, lstm_state1 = basic_conv_lstm_cell(hidden1_input, lstm_state1, 16, initializer, filter_size=3, scope='convlstm1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm1')
#LAYER 2: upconv1 (8x8 -> 16x16)
upconv1 = slim.layers.conv2d_transpose(hidden1, hidden1.get_shape()[3], 3, stride=2, scope='upconv1', weights_initializer=initializer,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm2'})
#LAYER 3: convLSTM2
hidden2, lstm_state2 = basic_conv_lstm_cell(upconv1, lstm_state2, 16, initializer, filter_size=3, scope='convlstm2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
#LAYER 4: upconv2 (16x16 -> 32x32)
upconv2 = slim.layers.conv2d_transpose(hidden2, hidden2.get_shape()[3], 3, stride=2, scope='upconv2', weights_initializer=initializer,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm4'})
#LAYER 5: convLSTM3
hidden3, lstm_state3 = basic_conv_lstm_cell(upconv2, lstm_state3, 16, initializer, filter_size=5, scope='convlstm3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm5')
# LAYER 6: upconv3 (32x32 -> 64x64)
upconv3 = slim.layers.conv2d_transpose(hidden3, hidden3.get_shape()[3], 5, stride=2, scope='upconv3', weights_initializer=initializer,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm6'})
#LAYER 7: convLSTM4
hidden4, lstm_state4 = basic_conv_lstm_cell(upconv3, lstm_state4, 16, initializer, filter_size=5, scope='convlstm4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm7')
#Layer 8: upconv4 (64x64 -> 128x128)
upconv4 = slim.layers.conv2d_transpose(hidden4, num_channels, 5, stride=2, scope='upconv4', weights_initializer=initializer)
frame_gen.append(upconv4)
assert len(frame_gen)==sequence_length
return frame_gen
def composite_model(frames, encoder_len=5, decoder_future_len=5, decoder_reconst_len=5, uniform_init=True, num_channels=3, fc_conv_layer=True):
"""
Args:
frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels)
encoder_len: number of frames that shall be encoded
decoder_future_sequence_length: number of frames that shall be decoded from the hidden_repr
num_channels: number of channels for generated frames
fc_conv_layer: indicates whether fully connected layer shall be added between encoder and decoder
uniform_init: specifies if the weight initialization should be drawn from gaussian or uniform distribution (default:uniform)
Returns:
frame_gen: array of generated frames (Tensors)
"""
assert all([len > 0 for len in [encoder_len, decoder_future_len, decoder_reconst_len]])
initializer = tf_layers.xavier_initializer(uniform=uniform_init)
hidden_repr = encoder_model(frames, encoder_len, initializer, fc_conv_layer=fc_conv_layer)
frames_pred = decoder_model(hidden_repr, decoder_future_len, initializer, num_channels=num_channels,
scope='decoder_pred', fc_conv_layer=fc_conv_layer)
frames_reconst = decoder_model(hidden_repr, decoder_reconst_len, initializer, num_channels=num_channels,
scope='decoder_reconst', fc_conv_layer=fc_conv_layer)
return frames_pred, frames_reconst, hidden_repr
|
jonasrothfuss/DeepEpisodicMemory
|
models/model_zoo/model_conv4_fc512.py
|
Python
|
mit
| 8,430
|
[
"Gaussian"
] |
7890aafdaebbf4dadc795fbfee848a0c69934352ce811e3297c70eaa434397de
|
'''<b>Automatic spot detection</b> - Extended morphological processing: a
practical method for automatic spot detection of biological markers from
microscopic images
<hr>
This is an implementation of the Extended morphological processing, published
by Yoshitaka Kimori, Norio Baba, and Nobuhiro Morone
in 2010 in BMC Bioinformatics.
doi: 10.1186/1471-2105-11-373
This module uses morphological transformation in order to automatically detects
spots in the picture. The spots can be from Fluoresence Microscopy images or
Electron Microscopy images.
'''
#
#
# Imports from useful Python libraries
#
#
import numpy as np
import scipy.ndimage as ndimage
#
#
# Imports from CellProfiler
#
# The package aliases are the standard ones we use
# throughout the code.
#
#
import identify as cpmi
from identify import draw_outline
import cellprofiler.cpmodule as cpm
import cellprofiler.settings as cps
import cellprofiler.objects as cpo
import cellprofiler.cpmath as cpmath
import cellprofiler.preferences as cpp
from cellprofiler.cpmath.filter import stretch
#
#
# Constants
#
# It's good programming practice to replace things like strings with
# constants if they will appear more than once in your program. That way,
# if someone wants to change the text, that text will change everywhere.
# Also, you can't misspell it by accident.
#
PP_MORPH = "Morphology Binary Opening"
PP_SIZE = "Size-based filter"
PP_GAUS = "Gaussian fitting"
PP_NONE = "No Post-Processing"
#
#
# The module class
#
# Your module should "inherit" from cellprofiler.cpmodule.CPModule.
# This means that your module will use the methods from CPModule unless
# you re-implement them. You can let CPModule do most of the work and
# implement only what you need.
#
#
class SpotAnalyzer(cpm.CPModule):
#
#
# The module starts by declaring the name that's used for display,
# the category under which it is stored and the variable revision
# number which can be used to provide backwards compatibility if
# you add user-interface functionality later.
#
#
module_name = "SpotAnalyzer"
category = "Object Processing"
variable_revision_number = 1
#
#
# create_settings is where you declare the user interface elements
# (the "settings") which the user will use to customize your module.
#
# You can look at other modules and in cellprofiler.settings for
# settings you can use.
#
#
def create_settings(self):
#
# The ImageNameSubscriber "subscribes" to all ImageNameProviders in
# prior modules. Modules before yours will put images into
# CellProfiler.
# The ImageSubscriber gives your user a list of these images
# which can then be used as inputs in your module.
#
self.input_image = cps.ImageNameSubscriber(
"Input spot image:",
doc="""This is the image that the module operates on. You can
choose any image that is made available by a prior module.
<br>
<b>SpotAnalizer</b> will detect the spots on this image.
""")
self.output_spots = cps.ObjectNameProvider(
"Output spot objects name:",
"Spots",
doc="""Enter the name that you want to call the objects identified
by this module.""")
self.apply_mask = cps.Binary(
"Constrains the detection to some Objects ?",
True,
doc="""If selected, the spots detected outside of the
input objects will be removed from the result.
This is recomended as this will avoid noise detection outside
of cell boundary.""")
self.input_object_mask = cps.ObjectNameSubscriber(
"Constrained Objects",
doc="""Objects to serve as a restriction mask.""")
self.angles = cps.Integer(
"Number of performed rotations:",
36,
minval=1,
maxval=180,
doc="""This parameter correspond to the number of rotations
performed for the rotational morphological processing.
According to the authors, 36 is the best TradeOff.
The higher the number is the better the accuracy.
But each rotation increase the computational cost.""")
self.SE_min = cps.Integer(
"Minimum Spot size:",
2,
minval=1,
maxval=100,
doc="""This parameter should be set to be smaler than the
smalest pixel size of the spots you are extracting.""")
self.SE_max = cps.Integer(
"Maximum Spot size:",
7,
minval=1,
maxval=100,
doc="""This parameter should be set to be bigger than the
biggest pixel size of the spots you are extracting.""")
self.Threshold = cps.Integer(
"Threshold:",
0,
minval=0,
maxval=100,
doc="""This parameter corespond to the Threshold used to
select the spots after the morphological transformations
were executed. It should be set to 0 to be less stringent,
and increased if too many False Discovery happens. By
increasing the value, you are reducing the False Discovery
Rate, but this may reduce the True Discovery Rate too.""")
self.post_processing = cps.Choice(
'Method to distinguish clumped objects',
[PP_NONE, PP_MORPH, PP_SIZE, PP_GAUS],
doc="""Post Processing can be useful in order to remove
persisting noise, or false postive.<br>
<ul>
<li>%(PP_MORPH)%: Method based on morphology, remove noise using
the morphology opening function.</li>
<li>%(PP_SIZE)%: Method based on the size of the objects. Any
object not in the size range will be removed.</li>
<li>%(PP_NONE)%: No Post-Processing.</li>
<li>%(PP_GAUS)%: Each Object is fitted with a gaussian.</li>
</ul>""" % globals())
self.gaussian_threshold = cps.Float(
'Correlation Threshold',
0.8,
minval=0,
maxval=1,
doc="""This value correspond to the Threshold to be applied on the
gaussian fit. When an object is fitted with a 2G gaussian
distribution, it's correlation is extracted. If the correlation
is inferior to the Threshold the spot is removed.""")
self.size_range = cps.IntegerRange(
"Allowed Spot size",
(2, 7), minval=1, doc='''
This setting correspond to the range size of allowed spots.
It can be useful to remove too small or too big wrongly
detected spots''')
#
# The "settings" method tells CellProfiler about the settings you
# have in your module. CellProfiler uses the list for saving
# and restoring values for your module when it saves or loads a
# pipeline file.
#
def settings(self):
return [self.input_image, self.output_spots, self.apply_mask,
self.input_object_mask, self.angles, self.SE_max,
self.SE_min, self.Threshold, self.post_processing,
self.size_range]
#
# visible_settings tells CellProfiler which settings should be
# displayed and in what order.
#
# You don't have to implement "visible_settings" - if you delete
# visible_settings, CellProfiler will use "settings" to pick settings
# for display.
#
def visible_settings(self):
result = [self.input_image, self.output_spots, self.apply_mask]
#
# Show the user the scale only if self.wants_smoothing is checked
#
if self.apply_mask:
result += [self.input_object_mask]
result += [self.angles, self.SE_max, self.SE_min, self.Threshold,
self.post_processing]
if self.post_processing.value == PP_SIZE:
result += [self.size_range]
return result
#
# CellProfiler calls "run" on each image set in your pipeline.
# This is where you do the real work.
#
def run(self, workspace):
#
# Get the input and output image names. You need to get the .value
# because otherwise you'll get the setting object instead of
# the string name.
#
input_image_name = self.input_image.value
if self.apply_mask:
input_mask_name = self.input_object_mask.value
# output_spot_name = self.output_spots.value
#
# Get the image set. The image set has all of the images in it.
# The assert statement makes sure that it really is an image set,
# but, more importantly, it lets my editor do context-sensitive
# completion for the image set.
#
image_set = workspace.image_set
# assert isinstance(image_set, cpi.ImageSet)
#
# Get the input image object. We want a grayscale image here.
# The image set will convert a color image to a grayscale one
# and warn the user.
#
input_image = image_set.get_image(input_image_name,
must_be_grayscale=True)
#
# Get the pixels - these are a 2-d Numpy array.
#
image_pixels = input_image.pixel_data
# Getting the mask
if self.apply_mask:
object_set = workspace.object_set
# assert isinstance(object_set, cpo.ObjectSet)
objects = object_set.get_objects(input_mask_name)
input_mask = objects.segmented
else:
input_mask = None
#
# Get the smoothing parameter
#
spots = self.Spot_Extraction(
image_pixels,
mask=input_mask,
N=self.angles.value,
l_noise=self.SE_min.value,
l_spot=self.SE_max.value,
Threshold=self.Threshold.value)
labeled_spots, counts_spots = ndimage.label(spots)
#
# Post Processing
#
if self.post_processing.value == PP_MORPH:
labeled_spots_filtered = self.filter_on_morph(labeled_spots)
elif self.post_processing.value == PP_SIZE:
labeled_spots, labeled_spots_filtered = self.filter_on_size(
labeled_spots, counts_spots)
elif self.post_processing.value == PP_GAUS:
labeled_spots_filtered = self.filter_on_gaussian(image_pixels,
labeled_spots)
else:
labeled_spots_filtered = labeled_spots
labeled_spots_filtered, counts_spots_filtered = ndimage.label(
labeled_spots_filtered)
#
# Make an image object. It's nice if you tell CellProfiler
# about the parent image - the child inherits the parent's
# cropping and masking, but it's not absolutely necessary
#
# Add image measurements
objname = self.output_spots.value
measurements = workspace.measurements
cpmi.add_object_count_measurements(measurements,
objname, counts_spots_filtered)
# Add label matrices to the object set
objects = cpo.Objects()
objects.segmented = labeled_spots_filtered
objects.unedited_segmented = labeled_spots
objects.post_processed = labeled_spots_filtered
objects.parent_image = input_image
outline_image = cpmath.outline.outline(labeled_spots)
outline_image_filtered = cpmath.outline.outline(labeled_spots_filtered)
workspace.object_set.add_objects(objects, self.output_spots.value)
cpmi.add_object_location_measurements(workspace.measurements,
self.output_spots.value,
labeled_spots_filtered)
#
# Save intermediate results for display if the window frame is on
#
workspace.display_data.input_pixels = image_pixels
workspace.display_data.output_pixels = spots
workspace.display_data.outline_image = outline_image
workspace.display_data.outline_image_filtered = outline_image_filtered
def Spot_Extraction(self, IMG, mask=None, N=36, l_noise=3, l_spot=6,
Threshold=0, Noise_reduction=True):
"""N : Number of rotations to perform
l_noise : lenght of straigh line segment for Strucure element (must be
< to the spot size)
l_spot : lenght of SE for spot extraction (must be > to the spot size)
Threshold : Threshold value for spot selection"""
IMG = (IMG * 65536).astype(np.int32)
IMG_Noise_red = self.RMP(IMG, N, l_noise)
IMG_Spot = self.RMP(IMG_Noise_red, N, l_spot)
IMG_TopHat = IMG - IMG_Spot
IMG_Spots = (IMG_TopHat > Threshold).astype(np.int)
if mask is not None:
IMG_Spots = IMG_Spots * mask
return IMG_Spots
def RMP(self, IMG, N, l):
opened_images = []
for n in range(N):
angle = (180 / N) * n
IMG_rotate = ndimage.interpolation.rotate(
IMG, angle, reshape=True, mode="constant", cval=0)
IMG_Opened = ndimage.morphology.grey_opening(
IMG_rotate, size=(0, l))
IMG2 = ndimage.interpolation.rotate(
IMG_Opened, -angle, reshape=True, mode="constant", cval=0)
a = IMG.shape
b = IMG2.shape
if a != b:
x = (b[0] - a[0]) / 2
y = (b[1] - a[1]) / 2
IMG2 = IMG2[x:x + a[0], y:y + a[1]]
opened_images.append(IMG2)
return np.array(opened_images, dtype=np.int32).max(axis=0)
def filter_on_morph(self, IMG):
"""Filter the spot image based the morphology opening function."""
return ndimage.morphology.binary_opening(IMG)
def filter_on_gaussian(self, IMG, spots):
"Filter the spots based on gaussian correlation."
return spots
def filter_on_size(self, labeled_image, object_count):
""" Filter the labeled image based on the size range
labeled_image - pixel image labels
object_count - # of objects in the labeled image
returns the labeled image, and the labeled image with the
small objects removed
"""
if object_count > 0:
areas = ndimage.measurements.sum(np.ones(labeled_image.shape),
labeled_image,
np.array(range(0,
object_count + 1),
dtype=np.int32))
areas = np.array(areas, dtype=int)
min_allowed_area = np.pi * \
(self.size_range.min * self.size_range.min) / 4
max_allowed_area = np.pi * \
(self.size_range.max * self.size_range.max) / 4
# area_image has the area of the object at every pixel within the
# object
area_image = areas[labeled_image]
labeled_image[area_image < min_allowed_area] = 0
small_removed_labels = labeled_image.copy()
labeled_image[area_image > max_allowed_area] = 0
else:
small_removed_labels = labeled_image.copy()
return (labeled_image, small_removed_labels)
#
# is_interactive tells CellProfiler whether "run" uses any interactive
# GUI elements. If you return False here, CellProfiler will run your
# module on a separate thread which will make the user interface more
# responsive.
#
def is_interactive(self):
return False
#
# display lets you use matplotlib to display your results.
#
def display(self, workspace, figure):
#
# the "figure" is really the frame around the figure. You almost always
# use figure.subplot or figure.subplot_imshow to get axes to draw on
# so we pretty much ignore the figure.
#
# figure = workspace.create_or_find_figure(subplots=(2, 1))
#
figure.set_subplots((2, 1))
# Show the user the input image
#
orig_axes = figure.subplot(0, 0)
figure.subplot(1, 0, sharexy=orig_axes)
figure.subplot_imshow_grayscale(
0, 0, # show the image in the first row and column
workspace.display_data.input_pixels,
title=self.input_image.value)
#
# Show the user the final image
#
if workspace.display_data.input_pixels.ndim == 2:
# Outline the size-excluded pixels in red
outline_img = np.ndarray(
shape=(workspace.display_data.input_pixels.shape[0],
workspace.display_data.input_pixels.shape[1], 3))
outline_img[:, :, 0] = workspace.display_data.input_pixels
outline_img[:, :, 1] = workspace.display_data.input_pixels
outline_img[:, :, 2] = workspace.display_data.input_pixels
else:
outline_img = workspace.display_data.image.copy()
#
# Stretch the outline image to the full scale
#
outline_img = stretch(outline_img)
# Outline the accepted objects pixels
draw_outline(outline_img, workspace.display_data.outline_image,
cpp.get_secondary_outline_color())
# Outline the size-excluded pixels
draw_outline(outline_img,
workspace.display_data.outline_image_filtered,
cpp.get_primary_outline_color())
title = "%s outlines" % (self.output_spots.value)
figure.subplot_imshow(1, 0, outline_img, title, normalize=False)
|
Xqua/SpotAnalizer
|
spotanalyzer.py
|
Python
|
gpl-2.0
| 17,971
|
[
"Gaussian"
] |
2e15836d4a700735f0ac65ea48e2eb94129559589a624924c8306b13d0dccaee
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line skeleton application for YouTube Analytics API.
Usage:
$ python sample.py
You can also get help on all the command-line flags the program understands
by running:
$ python sample.py --help
"""
import argparse
import httplib2
import os
import sys
import sqlite3 as lite
import time
from datetime import datetime, timedelta
from apiclient import discovery
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client import file
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
from oauth2client.client import flow_from_clientsecrets
from array import array
# CLIENT_SECRETS is name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret. You can see the Client ID
# and Client secret on the APIs page in the Cloud Console:
# <https://cloud.google.com/console#/project/590260946072/apiui>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
# These OAuth 2.0 access scopes allow for read-only access to the authenticated
# user's account for both YouTube Data API resources and YouTube Analytics Data.
YOUTUBE_SCOPES = ["https://www.googleapis.com/auth/youtube.readonly",
"https://www.googleapis.com/auth/yt-analytics.readonly"]
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
YOUTUBE_ANALYTICS_API_SERVICE_NAME = "youtubeAnalytics"
YOUTUBE_ANALYTICS_API_VERSION = "v1"
REPORT_VALUE = 365
INIT_TIME = time.time()
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Developers Console
https://cloud.google.com/console
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS))
# Set up a Flow object to be used for authentication.
# Add one or more of the following scopes. PLEASE ONLY ADD THE SCOPES YOU
# NEED. For more information on using scopes please see
# <https://developers.google.com/+/best-practices>.
FLOW = client.flow_from_clientsecrets(CLIENT_SECRETS,
scope=[
'https://www.googleapis.com/auth/yt-analytics-monetary.readonly',
'https://www.googleapis.com/auth/yt-analytics.readonly',
],
message=tools.message_if_missing(CLIENT_SECRETS))
channels_metrics = {'views':'Views','comments':'Comments','likes':'Likes','subscribersGained':'Subscribers Gained','subscribersLost':'Subscribers Lost','shares':'Shares'}
def get_authenticated_services(args, account_number):
flow = flow_from_clientsecrets(CLIENT_SECRETS,
scope=" ".join(YOUTUBE_SCOPES),
message=MISSING_CLIENT_SECRETS_MESSAGE)
youtubes = []
youtubes_analytics = []
for account_num in range(account_number):
storage = Storage("%s-oauth2-%d.json" % (sys.argv[0],account_num))
credentials = storage.get()
if not credentials or credentials.invalid:
credentials = run_flow(flow, storage, args)
http = credentials.authorize(httplib2.Http())
youtubes.append(build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=http))
youtubes_analytics.append(build(YOUTUBE_ANALYTICS_API_SERVICE_NAME,
YOUTUBE_ANALYTICS_API_VERSION, http=http))
return (youtubes, youtubes_analytics)
def get_channel_id(youtube):
channels_list_response = youtube.channels().list(
mine=True,
part="id"
).execute()
return channels_list_response["items"][0]["id"]
def run_analytics_report(youtube_analytics, channel_id, options):
# Call the Analytics API to retrieve a report. For a list of available
# reports, see:
# https://developers.google.com/youtube/analytics/v1/channel_reports
analytics_query_response = youtube_analytics.reports().query(
ids="channel==%s" % channel_id,
metrics=options.metrics,
start_date=options.start_date,
end_date=options.end_date,
max_results=options.max_results,
sort=options.sort
).execute()
print "Analytics Data for Channel %s" % channel_id
for column_header in analytics_query_response.get("columnHeaders", []):
print "%-20s" % column_header["name"],
print
for row in analytics_query_response.get("rows", []):
for value in row:
print "%-20s" % value,
print
def prepare_parser(tipo):
now = datetime.now()
one_day_ago = (now - timedelta(days=1)).strftime("%Y-%m-%d")
one_year_ago = (now - timedelta(days=tipo)).strftime("%Y-%m-%d")
argparser.add_argument("--metrics", help="Report metrics",
default="views,comments,likes,shares,subscribersGained,subscribersLost")
argparser.add_argument("--start-date", default=one_year_ago,
help="Start date, in YYYY-MM-DD format")
argparser.add_argument("--end-date", default=one_day_ago,
help="End date, in YYYY-MM-DD format")
argparser.add_argument("--max-results", help="Max results", default=10)
argparser.add_argument("--sort", help="Sort order", default="-views")
def main(argv):
#Number of channels
account_number = int(raw_input('Youtube channels number? '))
tipo = int(raw_input('Report days? '))
prepare_parser(tipo)
args = argparser.parse_args()
try:
(youtube, youtube_analytics) = get_authenticated_services(args,
account_number)
try:
for idx, val in enumerate(youtube):
channel_id = get_channel_id(val)
run_analytics_report(youtube_analytics[idx], channel_id, args)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
except client.AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
# For more information on the YouTube Analytics API you can visit:
#
# http://developers.google.com/youtube/analytics/
#
# For more information on the YouTube Analytics API Python library surface you
# can visit:
#
# https://developers.google.com/resources/api-libraries/documentation/youtubeAnalytics/v1/python/latest/
#
# For information on the Python Client Library visit:
#
# https://developers.google.com/api-client-library/python/start/get_started
if __name__ == '__main__':
main(sys.argv)
|
paradigmadigital/marketingMonitoringTool
|
multiple_channel_sample.py
|
Python
|
gpl-3.0
| 7,357
|
[
"VisIt"
] |
4fdec2aafdbdd8cf64645455679c762be140abc2d792f2fd702de8a28a239c2d
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Quantum ESPRESSO, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_QuantumESPRESSO(ConfigureMake):
"""Support for building and installing Quantum ESPRESSO."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Quantum ESPRESSO."""
extra_vars = {
'hybrid': [False, "Enable hybrid build (with OpenMP)", CUSTOM],
'with_scalapack': [True, "Enable ScaLAPACK support", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Add extra config options specific to Quantum ESPRESSO."""
super(EB_QuantumESPRESSO, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.install_subdir = "espresso-%s" % self.version
def patch_step(self):
"""Patch files from build dir (not start dir)."""
super(EB_QuantumESPRESSO, self).patch_step(beginpath=self.builddir)
def configure_step(self):
"""Custom configuration procedure for Quantum ESPRESSO."""
if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']:
self.cfg.update('configopts', '--enable-openmp')
if not self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', '--disable-parallel')
if not self.cfg['with_scalapack']:
self.cfg.update('configopts', '--without-scalapack')
repls = []
if self.toolchain.comp_family() in [toolchain.INTELCOMP]:
# set preprocessor command (-E to stop after preprocessing, -C to preserve comments)
cpp = "%s -E -C" % os.getenv('CC')
repls.append(('CPP', cpp, False))
env.setvar('CPP', cpp)
# also define $FCCPP, but do *not* include -C (comments should not be preserved when preprocessing Fortran)
env.setvar('FCCPP', "%s -E" % os.getenv('CC'))
super(EB_QuantumESPRESSO, self).configure_step()
# compose list of DFLAGS (flag, value, keep_stuff)
# for guidelines, see include/defs.h.README in sources
dflags = []
comp_fam_dflags = {
toolchain.INTELCOMP: '-D__INTEL',
toolchain.GCC: '-D__GFORTRAN -D__STD_F95',
}
dflags.append(comp_fam_dflags[self.toolchain.comp_family()])
if self.toolchain.options.get('openmp', False):
libfft = os.getenv('LIBFFT_MT')
else:
libfft = os.getenv('LIBFFT')
if libfft:
if "fftw3" in libfft:
dflags.append('-D__FFTW3')
else:
dflags.append('-D__FFTW')
env.setvar('FFTW_LIBS', libfft)
if get_software_root('ACML'):
dflags.append('-D__ACML')
if self.toolchain.options.get('usempi', None):
dflags.append('-D__MPI -D__PARA')
if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']:
dflags.append(" -D__OPENMP")
if self.cfg['with_scalapack']:
dflags.append(" -D__SCALAPACK")
# always include -w to supress warnings
dflags.append('-w')
repls.append(('DFLAGS', ' '.join(dflags), False))
# complete C/Fortran compiler and LD flags
if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']:
repls.append(('LDFLAGS', self.toolchain.get_flag('openmp'), True))
repls.append(('(?:C|F90|F)FLAGS', self.toolchain.get_flag('openmp'), True))
# obtain library settings
libs = []
for lib in ['BLAS', 'LAPACK', 'FFT', 'SCALAPACK']:
if self.toolchain.options.get('openmp', False):
val = os.getenv('LIB%s_MT' % lib)
else:
val = os.getenv('LIB%s' % lib)
repls.append(('%s_LIBS' % lib, val, False))
libs.append(val)
libs = ' '.join(libs)
repls.append(('BLAS_LIBS_SWITCH', 'external', False))
repls.append(('LAPACK_LIBS_SWITCH', 'external', False))
repls.append(('LD_LIBS', os.getenv('LIBS'), False))
self.log.debug("List of replacements to perform: %s" % repls)
# patch make.sys file
fn = os.path.join(self.cfg['start_dir'], 'make.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
for (k, v, keep) in repls:
# need to use [ \t]* instead of \s*, because vars may be undefined as empty,
# and we don't want to include newlines
if keep:
line = re.sub(r"^(%s\s*=[ \t]*)(.*)$" % k, r"\1\2 %s" % v, line)
else:
line = re.sub(r"^(%s\s*=[ \t]*).*$" % k, r"\1%s" % v, line)
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub(r"\$\(MPIF90\) \$\(F90FLAGS\) -c \$<",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch default make.sys for wannier
if LooseVersion(self.version) >= LooseVersion("5"):
fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90.sys')
else:
fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch Makefile of want plugin
wantprefix = 'want-'
wantdirs = [d for d in os.listdir(self.builddir) if d.startswith(wantprefix)]
if len(wantdirs) > 1:
raise EasyBuildError("Found more than one directory with %s prefix, help!", wantprefix)
if len(wantdirs) != 0:
wantdir = os.path.join(self.builddir, wantdirs[0])
make_sys_in_path = None
cand_paths = [os.path.join('conf', 'make.sys.in'), os.path.join('config', 'make.sys.in')]
for path in cand_paths:
full_path = os.path.join(wantdir, path)
if os.path.exists(full_path):
make_sys_in_path = full_path
break
if make_sys_in_path is None:
raise EasyBuildError("Failed to find make.sys.in in want directory %s, paths considered: %s",
wantdir, ', '.join(cand_paths))
try:
for line in fileinput.input(make_sys_in_path, inplace=1, backup='.orig.eb'):
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub("@f90rule@",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
# move non-espresso directories to where they're expected and create symlinks
try:
dirnames = [d for d in os.listdir(self.builddir) if not d.startswith('espresso')]
targetdir = os.path.join(self.builddir, "espresso-%s" % self.version)
for dirname in dirnames:
shutil.move(os.path.join(self.builddir, dirname), os.path.join(targetdir, dirname))
self.log.info("Moved %s into %s" % (dirname, targetdir))
dirname_head = dirname.split('-')[0]
linkname = None
if dirname_head == 'sax':
linkname = 'SaX'
if dirname_head == 'wannier90':
linkname = 'W90'
elif dirname_head in ['gipaw', 'plumed', 'want', 'yambo']:
linkname = dirname_head.upper()
if linkname:
os.symlink(os.path.join(targetdir, dirname), os.path.join(targetdir, linkname))
except OSError, err:
raise EasyBuildError("Failed to move non-espresso directories: %s", err)
def install_step(self):
"""Skip install step, since we're building in the install directory."""
pass
def sanity_check_step(self):
"""Custom sanity check for Quantum ESPRESSO."""
# build list of expected binaries based on make targets
bins = ["iotk", "iotk.x", "iotk_print_kinds.x"]
if 'cp' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["cp.x", "cppp.x", "wfdd.x"])
if 'gww' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["gww_fit.x", "gww.x", "head.x", "pw4gww.x"])
if 'ld1' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["ld1.x"])
if 'gipaw' in self.cfg['buildopts']:
bins.extend(["gipaw.x"])
if 'neb' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["neb.x", "path_interpolation.x"])
if 'ph' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["d3.x", "dynmat.x", "lambda.x", "matdyn.x", "ph.x", "phcg.x", "q2r.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["fqha.x", "q2qstar.x"])
if 'pp' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["average.x", "bands.x", "dos.x", "epsilon.x", "initial_state.x",
"plan_avg.x", "plotband.x", "plotproj.x", "plotrho.x", "pmw.x", "pp.x",
"projwfc.x", "sumpdos.x", "pw2wannier90.x", "pw_export.x", "pw2gw.x",
"wannier_ham.x", "wannier_plot.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["pw2bgw.x", "bgw2pw.x"])
else:
bins.extend(["pw2casino.x"])
if 'pw' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["dist.x", "ev.x", "kpoints.x", "pw.x", "pwi2xsf.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["generate_vdW_kernel_table.x"])
else:
bins.extend(["path_int.x"])
if LooseVersion(self.version) < LooseVersion("5.3.0"):
bins.extend(["band_plot.x", "bands_FS.x", "kvecs_FS.x"])
if 'pwcond' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["pwcond.x"])
if 'tddfpt' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["turbo_lanczos.x", "turbo_spectrum.x"])
upftools = []
if 'upf' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
upftools = ["casino2upf.x", "cpmd2upf.x", "fhi2upf.x", "fpmd2upf.x", "ncpp2upf.x",
"oldcp2upf.x", "read_upf_tofile.x", "rrkj2upf.x", "uspp2upf.x", "vdb2upf.x",
"virtual.x"]
if LooseVersion(self.version) > LooseVersion("5"):
upftools.extend(["interpolate.x", "upf2casino.x"])
if 'vdw' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["vdw.x"])
if 'w90' in self.cfg['buildopts']:
bins.extend(["wannier90.x"])
want_bins = []
if 'want' in self.cfg['buildopts']:
want_bins = ["bands.x", "blc2wan.x", "conductor.x", "current.x", "disentangle.x",
"dos.x", "gcube2plt.x", "kgrid.x", "midpoint.x", "plot.x", "sumpdos",
"wannier.x", "wfk2etsf.x"]
if LooseVersion(self.version) > LooseVersion("5"):
want_bins.extend(["cmplx_bands.x", "decay.x", "sax2qexml.x", "sum_sgm.x"])
if 'xspectra' in self.cfg['buildopts']:
bins.extend(["xspectra.x"])
yambo_bins = []
if 'yambo' in self.cfg['buildopts']:
yambo_bins = ["a2y", "p2y", "yambo", "ypp"]
pref = self.install_subdir
custom_paths = {
'files': [os.path.join(pref, 'bin', x) for x in bins] +
[os.path.join(pref, 'upftools', x) for x in upftools] +
[os.path.join(pref, 'WANT', 'bin', x) for x in want_bins] +
[os.path.join(pref, 'YAMBO', 'bin', x) for x in yambo_bins],
'dirs': [os.path.join(pref, 'include')]
}
super(EB_QuantumESPRESSO, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom path suggestions for Quantum ESPRESSO."""
guesses = super(EB_QuantumESPRESSO, self).make_module_req_guess()
# order matters here, 'bin' should be *last* in this list to ensure it gets prepended to $PATH last,
# so it gets preference over the others
# this is important since some binaries are available in two places (e.g. dos.x in both bin and WANT/bin)
bindirs = ['upftools', 'WANT/bin', 'YAMBO/bin', 'bin']
guesses.update({
'PATH': [os.path.join(self.install_subdir, bindir) for bindir in bindirs],
'CPATH': [os.path.join(self.install_subdir, 'include')],
})
return guesses
|
ocaisa/easybuild-easyblocks
|
easybuild/easyblocks/q/quantumespresso.py
|
Python
|
gpl-2.0
| 15,880
|
[
"ESPResSo",
"Quantum ESPRESSO",
"Wannier90",
"Yambo"
] |
4e8612555791c2a64fff3f1d223e5e03a3dcbfdaab1d2b69f5714d58170b204b
|
import encrypt.scrypt as scrypt
import encrypt.aes as aes
import num.enc as enc
import hashlib
import math
def encrypt(seed, passphrase):
"""
Encrypt the Electrum seed
"""
#1. Decode the seed value to the original number
seed = mn_decode(seed.split())
#2. Take a hash of the decoded seed to act as a scrypt salt
salt = hashlib.sha256(hashlib.sha256(seed).digest()).digest()[:4]
#3. Derive a key from the passphrase using scrypt
key = scrypt.hash(passphrase, salt, 16384, 8, 8)
#4. Split the key into half 1 and half 2
derivedhalf1 = key[0:32]
derivedhalf2 = key[32:64]
#5. Do AES256Encrypt(seedhalf1 xor derivedhalf1[0...15], derivedhalf2), call the 16-byte result encryptedhalf1
# (Electrum may change the number of words in a seed so we should future proof by just using the halfs rather than hardcoded lengths)
Aes = aes.Aes(derivedhalf2)
encryptedhalf1 = Aes.enc(enc.sxor(seed[:int(math.floor(len(seed)/2))], derivedhalf1[:16]))
#6. Do AES256Encrypt(seedhalf2 xor derivedhalf1[16...31], derivedhalf2), call the 16-byte result encryptedhalf2
encryptedhalf2 = Aes.enc(enc.sxor(seed[int(math.floor(len(seed)/2)):len(seed)], derivedhalf1[16:32]))
#7. The encrypted private key is the Base58Check-encoded concatenation of the following
# \x4E\xE3\x13\x35 + salt + encryptedhalf1 + encryptedhalf2
# (\x4E\xE3\x13\x35) gives the 'SeedE' prefix)
encSeed = '\x4E\xE3\x13\x35' + salt + encryptedhalf1 + encryptedhalf2
check = hashlib.sha256(hashlib.sha256(encSeed).digest()).digest()[:4]
return enc.b58encode(encSeed + check)
def decrypt(encSeed, passphrase):
"""
Decrypt an Electrum seed encrypted with the above method
"""
#1. Base 58 decrypt the encrypted key
# get the two encrypted halves, the check and the salt
decSeed = enc.b58decode(encSeed)
check = decSeed[-4:]
#check that it's not bee tampered with
if check != hashlib.sha256(hashlib.sha256(decSeed[:-4]).digest()).digest()[:4]:
return False
salt = decSeed[4:8]
encryptedhalfs = decSeed[8:len(decSeed)-4]
encryptedhalf1 = encryptedhalfs[0:int(math.floor(len(encryptedhalfs)/2))]
encryptedhalf2 = encryptedhalfs[int(math.floor(len(encryptedhalfs)/2)):]
#2. Derive the decryption key using scrypt
key = scrypt.hash(passphrase, salt, 16384, 8, 8)
derivedhalf1 = key[0:32]
derivedhalf2 = key[32:64]
#3. Decrypt the encrypted halves
Aes = aes.Aes(derivedhalf2)
decryptedhalf1 = Aes.dec(encryptedhalf1)
decryptedhalf2 = Aes.dec(encryptedhalf2)
#4 . xor them with the two halves of derivedhalf1 to get the original values
half1 = enc.sxor(decryptedhalf1, derivedhalf1[:16])
half2 = enc.sxor(decryptedhalf2, derivedhalf1[16:32])
#5. build the seed and check it against the check hash
seed = half1 + half2
if salt != hashlib.sha256(hashlib.sha256(seed).digest()).digest()[:4]:
return False
#6. encode the seed as an Electrum Mnemonic list
mn = mn_encode(str(seed))
#6 . return the mnemonic as a single string
seed = ''
for word in mn:
seed += word + ' '
return seed
def buildRandom():
"""
Generate a 12 word mnemonic for unit tests
"""
import random
outWords = ''
for i in xrange(0,12):
word = words[random.randint(0,(len(words)-1))]
outWords += word + ' '
return outWords
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# list of words from http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/Contemporary_poetry
words = [
"like",
"just",
"love",
"know",
"never",
"want",
"time",
"out",
"there",
"make",
"look",
"eye",
"down",
"only",
"think",
"heart",
"back",
"then",
"into",
"about",
"more",
"away",
"still",
"them",
"take",
"thing",
"even",
"through",
"long",
"always",
"world",
"too",
"friend",
"tell",
"try",
"hand",
"thought",
"over",
"here",
"other",
"need",
"smile",
"again",
"much",
"cry",
"been",
"night",
"ever",
"little",
"said",
"end",
"some",
"those",
"around",
"mind",
"people",
"girl",
"leave",
"dream",
"left",
"turn",
"myself",
"give",
"nothing",
"really",
"off",
"before",
"something",
"find",
"walk",
"wish",
"good",
"once",
"place",
"ask",
"stop",
"keep",
"watch",
"seem",
"everything",
"wait",
"got",
"yet",
"made",
"remember",
"start",
"alone",
"run",
"hope",
"maybe",
"believe",
"body",
"hate",
"after",
"close",
"talk",
"stand",
"own",
"each",
"hurt",
"help",
"home",
"god",
"soul",
"new",
"many",
"two",
"inside",
"should",
"true",
"first",
"fear",
"mean",
"better",
"play",
"another",
"gone",
"change",
"use",
"wonder",
"someone",
"hair",
"cold",
"open",
"best",
"any",
"behind",
"happen",
"water",
"dark",
"laugh",
"stay",
"forever",
"name",
"work",
"show",
"sky",
"break",
"came",
"deep",
"door",
"put",
"black",
"together",
"upon",
"happy",
"such",
"great",
"white",
"matter",
"fill",
"past",
"please",
"burn",
"cause",
"enough",
"touch",
"moment",
"soon",
"voice",
"scream",
"anything",
"stare",
"sound",
"red",
"everyone",
"hide",
"kiss",
"truth",
"death",
"beautiful",
"mine",
"blood",
"broken",
"very",
"pass",
"next",
"forget",
"tree",
"wrong",
"air",
"mother",
"understand",
"lip",
"hit",
"wall",
"memory",
"sleep",
"free",
"high",
"realize",
"school",
"might",
"skin",
"sweet",
"perfect",
"blue",
"kill",
"breath",
"dance",
"against",
"fly",
"between",
"grow",
"strong",
"under",
"listen",
"bring",
"sometimes",
"speak",
"pull",
"person",
"become",
"family",
"begin",
"ground",
"real",
"small",
"father",
"sure",
"feet",
"rest",
"young",
"finally",
"land",
"across",
"today",
"different",
"guy",
"line",
"fire",
"reason",
"reach",
"second",
"slowly",
"write",
"eat",
"smell",
"mouth",
"step",
"learn",
"three",
"floor",
"promise",
"breathe",
"darkness",
"push",
"earth",
"guess",
"save",
"song",
"above",
"along",
"both",
"color",
"house",
"almost",
"sorry",
"anymore",
"brother",
"okay",
"dear",
"game",
"fade",
"already",
"apart",
"warm",
"beauty",
"heard",
"notice",
"question",
"shine",
"began",
"piece",
"whole",
"shadow",
"secret",
"street",
"within",
"finger",
"point",
"morning",
"whisper",
"child",
"moon",
"green",
"story",
"glass",
"kid",
"silence",
"since",
"soft",
"yourself",
"empty",
"shall",
"angel",
"answer",
"baby",
"bright",
"dad",
"path",
"worry",
"hour",
"drop",
"follow",
"power",
"war",
"half",
"flow",
"heaven",
"act",
"chance",
"fact",
"least",
"tired",
"children",
"near",
"quite",
"afraid",
"rise",
"sea",
"taste",
"window",
"cover",
"nice",
"trust",
"lot",
"sad",
"cool",
"force",
"peace",
"return",
"blind",
"easy",
"ready",
"roll",
"rose",
"drive",
"held",
"music",
"beneath",
"hang",
"mom",
"paint",
"emotion",
"quiet",
"clear",
"cloud",
"few",
"pretty",
"bird",
"outside",
"paper",
"picture",
"front",
"rock",
"simple",
"anyone",
"meant",
"reality",
"road",
"sense",
"waste",
"bit",
"leaf",
"thank",
"happiness",
"meet",
"men",
"smoke",
"truly",
"decide",
"self",
"age",
"book",
"form",
"alive",
"carry",
"escape",
"damn",
"instead",
"able",
"ice",
"minute",
"throw",
"catch",
"leg",
"ring",
"course",
"goodbye",
"lead",
"poem",
"sick",
"corner",
"desire",
"known",
"problem",
"remind",
"shoulder",
"suppose",
"toward",
"wave",
"drink",
"jump",
"woman",
"pretend",
"sister",
"week",
"human",
"joy",
"crack",
"grey",
"pray",
"surprise",
"dry",
"knee",
"less",
"search",
"bleed",
"caught",
"clean",
"embrace",
"future",
"king",
"son",
"sorrow",
"chest",
"hug",
"remain",
"sat",
"worth",
"blow",
"daddy",
"final",
"parent",
"tight",
"also",
"create",
"lonely",
"safe",
"cross",
"dress",
"evil",
"silent",
"bone",
"fate",
"perhaps",
"anger",
"class",
"scar",
"snow",
"tiny",
"tonight",
"continue",
"control",
"dog",
"edge",
"mirror",
"month",
"suddenly",
"comfort",
"given",
"loud",
"quickly",
"gaze",
"plan",
"rush",
"stone",
"town",
"battle",
"ignore",
"spirit",
"stood",
"stupid",
"yours",
"brown",
"build",
"dust",
"hey",
"kept",
"pay",
"phone",
"twist",
"although",
"ball",
"beyond",
"hidden",
"nose",
"taken",
"fail",
"float",
"pure",
"somehow",
"wash",
"wrap",
"angry",
"cheek",
"creature",
"forgotten",
"heat",
"rip",
"single",
"space",
"special",
"weak",
"whatever",
"yell",
"anyway",
"blame",
"job",
"choose",
"country",
"curse",
"drift",
"echo",
"figure",
"grew",
"laughter",
"neck",
"suffer",
"worse",
"yeah",
"disappear",
"foot",
"forward",
"knife",
"mess",
"somewhere",
"stomach",
"storm",
"beg",
"idea",
"lift",
"offer",
"breeze",
"field",
"five",
"often",
"simply",
"stuck",
"win",
"allow",
"confuse",
"enjoy",
"except",
"flower",
"seek",
"strength",
"calm",
"grin",
"gun",
"heavy",
"hill",
"large",
"ocean",
"shoe",
"sigh",
"straight",
"summer",
"tongue",
"accept",
"crazy",
"everyday",
"exist",
"grass",
"mistake",
"sent",
"shut",
"surround",
"table",
"ache",
"brain",
"destroy",
"heal",
"nature",
"shout",
"sign",
"stain",
"choice",
"doubt",
"glance",
"glow",
"mountain",
"queen",
"stranger",
"throat",
"tomorrow",
"city",
"either",
"fish",
"flame",
"rather",
"shape",
"spin",
"spread",
"ash",
"distance",
"finish",
"image",
"imagine",
"important",
"nobody",
"shatter",
"warmth",
"became",
"feed",
"flesh",
"funny",
"lust",
"shirt",
"trouble",
"yellow",
"attention",
"bare",
"bite",
"money",
"protect",
"amaze",
"appear",
"born",
"choke",
"completely",
"daughter",
"fresh",
"friendship",
"gentle",
"probably",
"six",
"deserve",
"expect",
"grab",
"middle",
"nightmare",
"river",
"thousand",
"weight",
"worst",
"wound",
"barely",
"bottle",
"cream",
"regret",
"relationship",
"stick",
"test",
"crush",
"endless",
"fault",
"itself",
"rule",
"spill",
"art",
"circle",
"join",
"kick",
"mask",
"master",
"passion",
"quick",
"raise",
"smooth",
"unless",
"wander",
"actually",
"broke",
"chair",
"deal",
"favorite",
"gift",
"note",
"number",
"sweat",
"box",
"chill",
"clothes",
"lady",
"mark",
"park",
"poor",
"sadness",
"tie",
"animal",
"belong",
"brush",
"consume",
"dawn",
"forest",
"innocent",
"pen",
"pride",
"stream",
"thick",
"clay",
"complete",
"count",
"draw",
"faith",
"press",
"silver",
"struggle",
"surface",
"taught",
"teach",
"wet",
"bless",
"chase",
"climb",
"enter",
"letter",
"melt",
"metal",
"movie",
"stretch",
"swing",
"vision",
"wife",
"beside",
"crash",
"forgot",
"guide",
"haunt",
"joke",
"knock",
"plant",
"pour",
"prove",
"reveal",
"steal",
"stuff",
"trip",
"wood",
"wrist",
"bother",
"bottom",
"crawl",
"crowd",
"fix",
"forgive",
"frown",
"grace",
"loose",
"lucky",
"party",
"release",
"surely",
"survive",
"teacher",
"gently",
"grip",
"speed",
"suicide",
"travel",
"treat",
"vein",
"written",
"cage",
"chain",
"conversation",
"date",
"enemy",
"however",
"interest",
"million",
"page",
"pink",
"proud",
"sway",
"themselves",
"winter",
"church",
"cruel",
"cup",
"demon",
"experience",
"freedom",
"pair",
"pop",
"purpose",
"respect",
"shoot",
"softly",
"state",
"strange",
"bar",
"birth",
"curl",
"dirt",
"excuse",
"lord",
"lovely",
"monster",
"order",
"pack",
"pants",
"pool",
"scene",
"seven",
"shame",
"slide",
"ugly",
"among",
"blade",
"blonde",
"closet",
"creek",
"deny",
"drug",
"eternity",
"gain",
"grade",
"handle",
"key",
"linger",
"pale",
"prepare",
"swallow",
"swim",
"tremble",
"wheel",
"won",
"cast",
"cigarette",
"claim",
"college",
"direction",
"dirty",
"gather",
"ghost",
"hundred",
"loss",
"lung",
"orange",
"present",
"swear",
"swirl",
"twice",
"wild",
"bitter",
"blanket",
"doctor",
"everywhere",
"flash",
"grown",
"knowledge",
"numb",
"pressure",
"radio",
"repeat",
"ruin",
"spend",
"unknown",
"buy",
"clock",
"devil",
"early",
"false",
"fantasy",
"pound",
"precious",
"refuse",
"sheet",
"teeth",
"welcome",
"add",
"ahead",
"block",
"bury",
"caress",
"content",
"depth",
"despite",
"distant",
"marry",
"purple",
"threw",
"whenever",
"bomb",
"dull",
"easily",
"grasp",
"hospital",
"innocence",
"normal",
"receive",
"reply",
"rhyme",
"shade",
"someday",
"sword",
"toe",
"visit",
"asleep",
"bought",
"center",
"consider",
"flat",
"hero",
"history",
"ink",
"insane",
"muscle",
"mystery",
"pocket",
"reflection",
"shove",
"silently",
"smart",
"soldier",
"spot",
"stress",
"train",
"type",
"view",
"whether",
"bus",
"energy",
"explain",
"holy",
"hunger",
"inch",
"magic",
"mix",
"noise",
"nowhere",
"prayer",
"presence",
"shock",
"snap",
"spider",
"study",
"thunder",
"trail",
"admit",
"agree",
"bag",
"bang",
"bound",
"butterfly",
"cute",
"exactly",
"explode",
"familiar",
"fold",
"further",
"pierce",
"reflect",
"scent",
"selfish",
"sharp",
"sink",
"spring",
"stumble",
"universe",
"weep",
"women",
"wonderful",
"action",
"ancient",
"attempt",
"avoid",
"birthday",
"branch",
"chocolate",
"core",
"depress",
"drunk",
"especially",
"focus",
"fruit",
"honest",
"match",
"palm",
"perfectly",
"pillow",
"pity",
"poison",
"roar",
"shift",
"slightly",
"thump",
"truck",
"tune",
"twenty",
"unable",
"wipe",
"wrote",
"coat",
"constant",
"dinner",
"drove",
"egg",
"eternal",
"flight",
"flood",
"frame",
"freak",
"gasp",
"glad",
"hollow",
"motion",
"peer",
"plastic",
"root",
"screen",
"season",
"sting",
"strike",
"team",
"unlike",
"victim",
"volume",
"warn",
"weird",
"attack",
"await",
"awake",
"built",
"charm",
"crave",
"despair",
"fought",
"grant",
"grief",
"horse",
"limit",
"message",
"ripple",
"sanity",
"scatter",
"serve",
"split",
"string",
"trick",
"annoy",
"blur",
"boat",
"brave",
"clearly",
"cling",
"connect",
"fist",
"forth",
"imagination",
"iron",
"jock",
"judge",
"lesson",
"milk",
"misery",
"nail",
"naked",
"ourselves",
"poet",
"possible",
"princess",
"sail",
"size",
"snake",
"society",
"stroke",
"torture",
"toss",
"trace",
"wise",
"bloom",
"bullet",
"cell",
"check",
"cost",
"darling",
"during",
"footstep",
"fragile",
"hallway",
"hardly",
"horizon",
"invisible",
"journey",
"midnight",
"mud",
"nod",
"pause",
"relax",
"shiver",
"sudden",
"value",
"youth",
"abuse",
"admire",
"blink",
"breast",
"bruise",
"constantly",
"couple",
"creep",
"curve",
"difference",
"dumb",
"emptiness",
"gotta",
"honor",
"plain",
"planet",
"recall",
"rub",
"ship",
"slam",
"soar",
"somebody",
"tightly",
"weather",
"adore",
"approach",
"bond",
"bread",
"burst",
"candle",
"coffee",
"cousin",
"crime",
"desert",
"flutter",
"frozen",
"grand",
"heel",
"hello",
"language",
"level",
"movement",
"pleasure",
"powerful",
"random",
"rhythm",
"settle",
"silly",
"slap",
"sort",
"spoken",
"steel",
"threaten",
"tumble",
"upset",
"aside",
"awkward",
"bee",
"blank",
"board",
"button",
"card",
"carefully",
"complain",
"crap",
"deeply",
"discover",
"drag",
"dread",
"effort",
"entire",
"fairy",
"giant",
"gotten",
"greet",
"illusion",
"jeans",
"leap",
"liquid",
"march",
"mend",
"nervous",
"nine",
"replace",
"rope",
"spine",
"stole",
"terror",
"accident",
"apple",
"balance",
"boom",
"childhood",
"collect",
"demand",
"depression",
"eventually",
"faint",
"glare",
"goal",
"group",
"honey",
"kitchen",
"laid",
"limb",
"machine",
"mere",
"mold",
"murder",
"nerve",
"painful",
"poetry",
"prince",
"rabbit",
"shelter",
"shore",
"shower",
"soothe",
"stair",
"steady",
"sunlight",
"tangle",
"tease",
"treasure",
"uncle",
"begun",
"bliss",
"canvas",
"cheer",
"claw",
"clutch",
"commit",
"crimson",
"crystal",
"delight",
"doll",
"existence",
"express",
"fog",
"football",
"gay",
"goose",
"guard",
"hatred",
"illuminate",
"mass",
"math",
"mourn",
"rich",
"rough",
"skip",
"stir",
"student",
"style",
"support",
"thorn",
"tough",
"yard",
"yearn",
"yesterday",
"advice",
"appreciate",
"autumn",
"bank",
"beam",
"bowl",
"capture",
"carve",
"collapse",
"confusion",
"creation",
"dove",
"feather",
"girlfriend",
"glory",
"government",
"harsh",
"hop",
"inner",
"loser",
"moonlight",
"neighbor",
"neither",
"peach",
"pig",
"praise",
"screw",
"shield",
"shimmer",
"sneak",
"stab",
"subject",
"throughout",
"thrown",
"tower",
"twirl",
"wow",
"army",
"arrive",
"bathroom",
"bump",
"cease",
"cookie",
"couch",
"courage",
"dim",
"guilt",
"howl",
"hum",
"husband",
"insult",
"led",
"lunch",
"mock",
"mostly",
"natural",
"nearly",
"needle",
"nerd",
"peaceful",
"perfection",
"pile",
"price",
"remove",
"roam",
"sanctuary",
"serious",
"shiny",
"shook",
"sob",
"stolen",
"tap",
"vain",
"void",
"warrior",
"wrinkle",
"affection",
"apologize",
"blossom",
"bounce",
"bridge",
"cheap",
"crumble",
"decision",
"descend",
"desperately",
"dig",
"dot",
"flip",
"frighten",
"heartbeat",
"huge",
"lazy",
"lick",
"odd",
"opinion",
"process",
"puzzle",
"quietly",
"retreat",
"score",
"sentence",
"separate",
"situation",
"skill",
"soak",
"square",
"stray",
"taint",
"task",
"tide",
"underneath",
"veil",
"whistle",
"anywhere",
"bedroom",
"bid",
"bloody",
"burden",
"careful",
"compare",
"concern",
"curtain",
"decay",
"defeat",
"describe",
"double",
"dreamer",
"driver",
"dwell",
"evening",
"flare",
"flicker",
"grandma",
"guitar",
"harm",
"horrible",
"hungry",
"indeed",
"lace",
"melody",
"monkey",
"nation",
"object",
"obviously",
"rainbow",
"salt",
"scratch",
"shown",
"shy",
"stage",
"stun",
"third",
"tickle",
"useless",
"weakness",
"worship",
"worthless",
"afternoon",
"beard",
"boyfriend",
"bubble",
"busy",
"certain",
"chin",
"concrete",
"desk",
"diamond",
"doom",
"drawn",
"due",
"felicity",
"freeze",
"frost",
"garden",
"glide",
"harmony",
"hopefully",
"hunt",
"jealous",
"lightning",
"mama",
"mercy",
"peel",
"physical",
"position",
"pulse",
"punch",
"quit",
"rant",
"respond",
"salty",
"sane",
"satisfy",
"savior",
"sheep",
"slept",
"social",
"sport",
"tuck",
"utter",
"valley",
"wolf",
"aim",
"alas",
"alter",
"arrow",
"awaken",
"beaten",
"belief",
"brand",
"ceiling",
"cheese",
"clue",
"confidence",
"connection",
"daily",
"disguise",
"eager",
"erase",
"essence",
"everytime",
"expression",
"fan",
"flag",
"flirt",
"foul",
"fur",
"giggle",
"glorious",
"ignorance",
"law",
"lifeless",
"measure",
"mighty",
"muse",
"north",
"opposite",
"paradise",
"patience",
"patient",
"pencil",
"petal",
"plate",
"ponder",
"possibly",
"practice",
"slice",
"spell",
"stock",
"strife",
"strip",
"suffocate",
"suit",
"tender",
"tool",
"trade",
"velvet",
"verse",
"waist",
"witch",
"aunt",
"bench",
"bold",
"cap",
"certainly",
"click",
"companion",
"creator",
"dart",
"delicate",
"determine",
"dish",
"dragon",
"drama",
"drum",
"dude",
"everybody",
"feast",
"forehead",
"former",
"fright",
"fully",
"gas",
"hook",
"hurl",
"invite",
"juice",
"manage",
"moral",
"possess",
"raw",
"rebel",
"royal",
"scale",
"scary",
"several",
"slight",
"stubborn",
"swell",
"talent",
"tea",
"terrible",
"thread",
"torment",
"trickle",
"usually",
"vast",
"violence",
"weave",
"acid",
"agony",
"ashamed",
"awe",
"belly",
"blend",
"blush",
"character",
"cheat",
"common",
"company",
"coward",
"creak",
"danger",
"deadly",
"defense",
"define",
"depend",
"desperate",
"destination",
"dew",
"duck",
"dusty",
"embarrass",
"engine",
"example",
"explore",
"foe",
"freely",
"frustrate",
"generation",
"glove",
"guilty",
"health",
"hurry",
"idiot",
"impossible",
"inhale",
"jaw",
"kingdom",
"mention",
"mist",
"moan",
"mumble",
"mutter",
"observe",
"ode",
"pathetic",
"pattern",
"pie",
"prefer",
"puff",
"rape",
"rare",
"revenge",
"rude",
"scrape",
"spiral",
"squeeze",
"strain",
"sunset",
"suspend",
"sympathy",
"thigh",
"throne",
"total",
"unseen",
"weapon",
"weary"
]
n = 1626
# Note about US patent no 5892470: Here each word does not represent a given digit.
# Instead, the digit represented by a word is variable, it depends on the previous word.
def mn_encode( message ):
assert len(message) % 8 == 0
out = []
for i in range(len(message)/8):
word = message[8*i:8*i+8]
x = int(word, 16)
w1 = (x%n)
w2 = ((x/n) + w1)%n
w3 = ((x/n/n) + w2)%n
out += [ words[w1], words[w2], words[w3] ]
return out
def mn_decode( wlist ):
out = ''
for i in range(len(wlist)/3):
word1, word2, word3 = wlist[3*i:3*i+3]
w1 = words.index(word1)
w2 = (words.index(word2))%n
w3 = (words.index(word3))%n
x = w1 +n*((w2-w1)%n) +n*n*((w3-w2)%n)
out += '%08x'%x
return out
|
inuitwallet/bippy_old
|
encrypt/electrum.py
|
Python
|
mit
| 20,149
|
[
"CRYSTAL",
"VisIt"
] |
f8f785b10e582287b523dad34bafdb4fe6343d22b0ae9fd91edf4eb4a86c0c0b
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import inspect
# Import the relevant PTS classes and modules
from pts.core.tools import filesystem as fs
from pts.core.units.parsing import parse_unit as u
from pts.core.basics.configuration import Configuration
from pts.core.simulation.skifile import SkiFile
from pts.core.basics.range import QuantityRange, RealRange
from pts.core.basics.map import Map
from pts.do.commandline import Command
from pts.core.test.implementation import TestImplementation
# -----------------------------------------------------------------
this_path = fs.absolute_path(inspect.stack()[0][1])
this_dir_path = fs.directory_of(this_path)
# -----------------------------------------------------------------
description = "determining parameters based on mock observations of a simple spiral galaxy model"
# -----------------------------------------------------------------
# Determine the ski path
ski_path = fs.join(this_dir_path, "spiral.ski")
# Get the initial dust mass of the exponential disk with spiral structure
ski = SkiFile(ski_path)
dust_mass = ski.get_labeled_value("exp_dustmass")
# -----------------------------------------------------------------
class SpiralTest(TestImplementation):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Call the constructor of the base class
super(SpiralTest, self).__init__(*args, **kwargs)
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
pass
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
super(SpiralTest, self).setup(**kwargs)
# -----------------------------------------------------------------
def launch_reference(self):
"""
This function ...
:return:
"""
# Determine the simulation output path
simulation_output_path = "./ref"
# Settings
settings_launch = dict()
settings_launch["ski"] = ski_path
settings_launch["output"] = simulation_output_path
settings_launch["create_output"] = True
# Input
input_launch = dict()
# Construct the command
launch = Command("launch_simulation", "launch the reference simulation", settings_launch, input_launch, cwd=".")
# Add the command
#commands.append(launch)
launcher = self.run_command(launch)
# -----------------------------------------------------------------
def create_sed(self):
"""
This function ...
:return:
"""
# Create simulation
#prefix = name = "spiral"
output_path = simulation_output_path
#simulation = SkirtSimulation(prefix, outpath=output_path, ski_path=ski_path, name=name)
# Settings
settings_sed = dict()
settings_sed["spectral_convolution"] = False
# Input
input_sed = dict()
input_sed["simulation_output_path"] = simulation_output_path
input_sed["output_path"] = "."
# Construct the command
create_sed = Command("observed_fluxes", "create the mock SED", settings_sed, input_sed, cwd=".")
# Add the command
#commands.append(create_sed)
calculator = self.run_command(create_sed)
# Determine the path to the mock SED
mock_sed_path = "spiral_earth_fluxes.dat"
# -----------------------------------------------------------------
def setup_modelling(self):
"""
This function ...
:return:
"""
# -----------------------------------------------------------------
# SETUP THE MODELLING
# -----------------------------------------------------------------
# Settings
settings_setup = dict()
settings_setup["type"] = "sed"
settings_setup["name"] = "Spiral"
settings_setup["fitting_host_ids"] = None
# Create object config
object_config = dict()
object_config["ski"] = ski_path
# Create input dict for setup
input_setup = dict()
input_setup["object_config"] = object_config
input_setup["sed"] = mock_sed_path
# Construct the command
stp = Command("setup", "setup the modeling", settings_setup, input_setup, cwd=".")
# Add the command
commands.append(stp)
# -----------------------------------------------------------------
def model(self):
"""
This function ...
:return:
"""
# Settings
settings_model = dict()
settings_model["ngenerations"] = 4
settings_model["nsimulations"] = 20
settings_model["fitting_settings"] = {"spectral_convolution": False}
# Input
# Get free parameter names
ski = SkiFile(ski_path)
free_parameter_names = ski.labels
# Get fitting filter names
#filter_names = sed.filter_names()
# Set descriptions
descriptions = Map()
descriptions["exp_dustmass"] = "dust mass of the exponential disk with spiral structure"
# Set types
types = Map()
types["exp_dustmass"] = "dust mass"
# Set units
units = Map()
units["exp_dustmass"] = u("Msun")
# Set the range of the dust mass
dustmass_range = QuantityRange(0.1*dust_mass, 100*dust_mass)
# Create input dict for model
input_model = dict()
input_model["parameters_config"] = Configuration(free_parameters=free_parameter_names)
input_model["descriptions_config"] = Configuration(descriptions=descriptions)
input_model["types_config"] = Configuration(types=types)
input_model["units_config"] = Configuration(units=units)
input_model["ranges_config"] = Configuration(exp_dustmass_range=dustmass_range)
#input_model["filters_config"] = Configuration(filters=filter_names)
# Fitting initializer config
input_model["initialize_config"] = Configuration(npackages=1e4)
# Add dict of input for 'model' command to the list
#input_dicts.append(input_model)
# Construct the command
command = Command("model", "perform the modelling", settings_model, input_model, "./Spiral")
# Add the command
#commands.append(command)
modeler = self.run_command(command)
# -----------------------------------------------------------------
# TEST FUNCTION
# -----------------------------------------------------------------
def test(temp_path):
"""
This function ...
"""
return
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/tests/Spiral/test.py
|
Python
|
agpl-3.0
| 7,394
|
[
"Galaxy"
] |
6657267edcb81bd23dc3a0bc000165b464e9ad8755245ae1cbb43fce7ace0942
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import markdown as md
from django.conf import settings
from django import template
from django.utils import timezone
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter
def querysort(value, arg):
"""
Sorts a query set based on a field
"""
value = value.order_by(arg)
return value
@register.filter
def markdown(value):
return md.markdown(value)
@register.filter
def firstwords(value, arg):
arg = int(arg)
return " ".join(value.split()[:arg]) + "..."
@register.filter
def timesince(value):
diff = timezone.now() - value
plural = ""
if diff.days == 0:
hours = int(diff.seconds / 3600.0)
if hours != 1:
plural = "s"
return "%d hour%s ago" % (int(diff.seconds / 3600.0), plural)
else:
if diff.days != 1:
plural = "s"
return "%d day%s ago" % (diff.days, plural)
@register.filter
@stringfilter
def urlname(value):
parts = value.split('/')
paths = []
for part in parts:
if not part == '':
paths.append(part)
if len(paths) > 1:
return check_title("%s %s" % (paths[-2].title(), paths[-1].title()))
elif len(paths) > 0:
return check_title(paths[-1].title())
else:
return ''
@register.filter
def check_title(value):
if value == 'Password Change':
return 'Change Password'
elif value == 'Accounts Connect':
return 'Connect to Github'
elif value == 'Role Add':
return 'Add a Role'
elif value == 'Accounts Login':
return 'Login'
elif value == 'Accounts Landing':
return 'Success!'
elif value == 'Accounts Logout':
return 'Logout'
elif value == 'Accounts Profile':
return 'My Content'
elif value == 'Accounts Email':
return 'Manage Email'
elif value == 'Intro':
return 'About'
elif 'Confirm-Email' in value:
return 'Confirm Email'
else:
return value
@register.assignment_tag
def get_galaxy_version():
return settings.version
|
chouseknecht/galaxy
|
galaxy/main/templatetags/galaxyhelpers.py
|
Python
|
apache-2.0
| 2,772
|
[
"Galaxy"
] |
fa8f971e60a8034ef3772590ec12c9d1b88c238c8bbe79090e2aee9e9932be7b
|
# -*- coding: utf-8 -*-
""" Volunteer Management System """
#from gluon.sql import Rows
prefix = request.controller
resourcename = request.function
if prefix not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
# Options Menu (available in all Functions)
def shn_menu():
menu = [
[T("Home"), False, aURL(r=request, f="index")],
[T("Projects"), False, aURL(r=request, f="project"),[
[T("Search"), False, aURL(r=request, f="project", args="search_location")],
[T("Add Project"), False, aURL(p="create", r=request, f="project", args="create")],
]],
]
if session.rcvars and "project_project" in session.rcvars:
project_id = session.rcvars["project_project"]
selection = db.project_project[project_id]
if selection:
menu_project = [
["%s %s" % (T("Project") + ":", selection.code), False, aURL(r=request, f="project", args=[project_id]),[
[T("Tasks"), False, aURL(r=request, f="project", args=[project_id, "task"])],
# Staff cannot be a component of Project since staff may be assigned to many projects
#[T("Staff"), False, URL(r=request, f="project", args=[project_id, "staff"])],
]]
]
menu.extend(menu_project)
menu_teams = [
[T("Teams"), False, aURL(r=request, f="group"),[
[T("List"), False, aURL(r=request, f="group")],
[T("Add"), False, aURL(p="create", r=request, f="group", args="create")],
]]
]
menu.extend(menu_teams)
if session.rcvars and "pr_group" in session.rcvars:
group_id = session.rcvars["pr_group"]
selection = db.pr_group[group_id]
if selection:
team_name = shn_pr_group_represent(group_id)
menu_teams = [
["%s %s" % (T("Team") + ":", team_name), False, aURL(r=request, f="group", args=[group_id, "read"]),[
[T("View On Map"), False, aURL(r=request, f="view_team_map", args=[group_id])],
[T("Send Notification"), False, aURL(r=request, f="compose_group", vars={"group_id":group_id})],
#[T("Find Volunteers"), False, aURL(r=request, f="skillSearch")],
]],
]
menu.extend(menu_teams)
menu_persons = [
[T("Volunteers"), False, aURL(r=request, f="person", args=["search"]),[
[T("List"), False, aURL(r=request, f="person")],
[T("Add"), False, aURL(p="create", r=request, f="person", args="create")],
#[T("Find Volunteers"), False, aURL(r=request, f="skillSearch")],
]]
]
menu.extend(menu_persons)
if session.rcvars and "pr_person" in session.rcvars:
person_id = session.rcvars["pr_person"]
selection = db.pr_person[person_id]
if selection:
person_name = shn_pr_person_represent(person_id)
# ?vol_tabs=person and ?vol_tabs=volunteer are used by the person
# controller to select which set of tabs to display.
menu_person = [
["%s %s" % (T("Person") + ":", person_name), False, aURL(r=request, f="person", args=[person_id, "read"]),[
# The arg "volunteer" causes this to display the
# vol_volunteer tab initially.
[T("Volunteer Data"), False, aURL(r=request, f="person", args=[person_id, "volunteer"], vars={"vol_tabs":"volunteer"})],
# The default tab is pr_person, which is fine here.
[T("Person Data"), False, aURL(r=request, f="person", args=[person_id], vars={"vol_tabs":"person"})],
[T("View On Map"), False, aURL(r=request, f="view_map", args=[person_id])],
[T("Send Notification"), False, URL(r=request, f="compose_person", vars={"person_id":person_id})],
]],
]
menu.extend(menu_person)
menu_skills = [
[T("Skills"), False, aURL(r=request, f="skill")],
]
menu.extend(menu_skills)
if auth.user is not None:
menu_user = [
[T("My Tasks"), False, aURL(r=request, f="task", args="")],
]
menu.extend(menu_user)
response.menu_options = menu
shn_menu()
def index():
""" Module's Home Page """
# Module's nice name
try:
module_name = deployment_settings.modules[prefix].name_nice
except:
module_name = T("Volunteer Management")
# Override prefix and resourcename
_prefix = "pr"
resourcename = "person"
# Choose table
tablename = "%s_%s" % (_prefix, resourcename)
table = db[tablename]
# Configure redirection and list fields
register_url = str(URL(r=request, f=resourcename,
args=["[id]", "volunteer"],
vars={"vol_tabs":1}))
s3xrc.model.configure(table,
create_next=register_url,
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"gender",
"occupation"])
# Pre-process
def prep(r):
""" Redirect to search/person view """
if r.representation == "html":
if not r.id:
r.method = "search"
else:
redirect(URL(r=request, f=resourcename, args=[r.id]))
return True
# Post-process
def postp(r, output):
""" Custom action buttons """
response.s3.actions = []
# Button labels
REGISTER = str(T("Register"))
DETAILS = str(T("Details"))
if not r.component:
open_button_label = DETAILS
if auth.s3_logged_in():
# Set action buttons
response.s3.actions = [
dict(label=REGISTER, _class="action-btn", url=register_url)
]
else:
open_button_label = UPDATE
# Always have an Open-button
linkto = r.resource.crud._linkto(r, update=True)("[id]")
response.s3.actions.append(dict(label=open_button_label,
_class="action-btn", url=linkto))
return output
# Set hooks
response.s3.prep = prep
response.s3.postp = postp
if auth.s3_logged_in():
add_btn = A(T("Add Person"),
_class="action-btn",
_href=URL(r=request, f="person", args="create"))
else:
add_btn = None
# REST controllerperson
output = s3_rest_controller(_prefix, resourcename,
module_name=module_name,
add_btn=add_btn)
# Set view, update menu and return output
response.view = "vol/index.html"
response.title = module_name
shn_menu()
return output
# -----------------------------------------------------------------------------
# People
# -----------------------------------------------------------------------------
def register():
"""
Custom page to allow people to register as a Volunteer whilst hiding the complexity of the data model.
"""
# Fields that we want in our custom Form
# Dicts of tablename/fieldname
fields = [
{
"tablename" : "pr_person",
"fieldname" : "first_name",
"required" : True
},
{
"tablename" : "pr_person",
"fieldname" : "last_name"
},
{
"tablename" : "pr_pe_contact",
"fieldname" : "value",
"formfieldname" : "telephone",
"label" : T("Telephone"),
"comment" : DIV(_class="tooltip",
_title="%s|%s" % (T("Telephone"),
T("Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.")))
},
{
"tablename" : "pr_pe_contact",
"fieldname" : "value",
"formfieldname" : "email",
"label" : T("Email Address"),
"required" : True
},
{
"tablename" : "vol_credential",
"fieldname" : "skill_id",
#"label":T("My Current function")
},
{
"tablename" : "vol_volunteer",
"fieldname" : "location_id",
#"label" : T("I am available in the following area(s)"),
},
]
# Forms
forms = Storage()
forms["pr_person"] = SQLFORM(db.pr_person)
forms["pr_pe_contact1"] = SQLFORM(db.pr_pe_contact)
forms["pr_pe_contact2"] = SQLFORM(db.pr_pe_contact)
forms["vol_credential"] = SQLFORM(db.vol_credential)
forms["vol_volunteer"] = SQLFORM(db.vol_volunteer)
form_rows = []
required = SPAN(" *", _class="req")
for field in fields:
tablename = field["tablename"]
fieldname = field["fieldname"]
# Label
try:
label = "%s:" % field["label"]
except:
label = "%s:" % db[tablename][fieldname].label
try:
if field["required"]:
label = DIV(label, required)
except:
pass
label = TD(label, _class="w2p_fl")
# Widget
try:
if field["formfieldname"] == "telephone":
widget = forms["%s1" % tablename].custom.widget[fieldname]
elif field["formfieldname"] == "email":
widget = forms["%s2" % tablename].custom.widget[fieldname]
widget.attributes["_id"] = field["formfieldname"]
widget.attributes["_name"] = field["formfieldname"]
except:
widget = forms[tablename].custom.widget[fieldname]
# Comment
try:
comment = field["comment"]
except:
comment = db[tablename][fieldname].comment or ""
form_rows.append(TR(label))
form_rows.append(TR(widget, comment))
form = FORM(TABLE(*form_rows),
INPUT(_value = T("Save"),
_type = "submit"))
if form.accepts(request.vars, session):
# Insert Person Record
person_id = db.pr_person.insert(first_name=request.vars.first_name,
last_name=request.vars.last_name)
# Update Super-Entity
record = Storage(id=person_id)
s3xrc.model.update_super(db.pr_person, record)
# Register as Volunteer
# @ToDo: Handle Available Times (which needs reworking anyway)
db.vol_volunteer.insert(person_id=person_id,
location_id=request.vars.location_id,
status=1) # Active
# Insert Credential
db.vol_credential.insert(person_id=person_id,
skill_id=request.vars.skill_id,
status=1) # Pending
pe_id = db(db.pr_person.id == person_id).select(db.pr_person.pe_id, limitby=(0, 1)).first().pe_id
# Insert Email
db.pr_pe_contact.insert(pe_id=pe_id, contact_method=1, value=request.vars.email)
# Insert Telephone
db.pr_pe_contact.insert(pe_id=pe_id, contact_method=2, value=request.vars.telephone)
response.confirmation = T("Sign-up succesful - you should hear from us soon!")
return dict(form=form)
# -----------------------------------------------------------------------------
def person():
"""
This controller produces either generic person component tabs or
volunteer-specific person component tabs, depending on whether "vol_tabs"
in the URL's vars is "person" or "volunteer".
"""
# Override prefix
_prefix = "pr"
# Choose table
tablename = "%s_%s" % (_prefix, resourcename)
table = db[tablename]
# Configure redirection and list fields
register_url = str(URL(r=request, f=resourcename,
args=["[id]", "volunteer"],
vars={"vol_tabs":1}))
s3xrc.model.configure(table,
create_next=register_url)
tab_set = "person"
if "vol_tabs" in request.vars:
tab_set = request.vars["vol_tabs"]
if tab_set == "person":
#table.pr_impact_tags.readable=False
table.missing.default = False
tabs = [(T("Basic Details"), None),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "pe_contact"),
(T("Presence Log"), "presence")]
else:
db.pr_group_membership.group_id.label = T("Team Id")
db.pr_group_membership.group_head.label = T("Team Leader")
s3xrc.model.configure(db.pr_group_membership,
list_fields=["id",
"group_id",
"group_head",
"description"])
tabs = [
(T("Availability"), "volunteer"),
(T("Teams"), "group_membership"),
(T("Skills"), "credential"),
]
# Pre-process
def prep(r):
if r.representation in s3.interactive_view_formats:
# CRUD strings
ADD_VOL = T("Add Volunteer")
LIST_VOLS = T("List Volunteers")
s3.crud_strings[tablename] = Storage(
title_create = T("Add a Volunteer"),
title_display = T("Volunteer Details"),
title_list = LIST_VOLS,
title_update = T("Edit Volunteer Details"),
title_search = T("Search Volunteers"),
subtitle_create = ADD_VOL,
subtitle_list = T("Volunteers"),
label_list_button = LIST_VOLS,
label_create_button = ADD_VOL,
label_delete_button = T("Delete Volunteer"),
msg_record_created = T("Volunteer added"),
msg_record_modified = T("Volunteer details updated"),
msg_record_deleted = T("Volunteer deleted"),
msg_list_empty = T("No Volunteers currently registered"))
if r.component:
# Allow users to be registered as volunteers
if r.component.name == "presence":
db.pr_presence.presence_condition.default = vita.CONFIRMED
db.pr_presence.presence_condition.readable = False
db.pr_presence.presence_condition.writable = False
db.pr_presence.orig_id.readable = False
db.pr_presence.orig_id.writable = False
db.pr_presence.dest_id.readable = False
db.pr_presence.dest_id.writable = False
db.pr_presence.proc_desc.readable = False
db.pr_presence.proc_desc.writable = False
else:
# Only display active volunteers
response.s3.filter = (table.id == db.vol_volunteer.person_id) & (db.vol_volunteer.status == 1)
return True
response.s3.prep = prep
output = s3_rest_controller(_prefix, resourcename,
rheader=lambda r: shn_pr_rheader(r, tabs))
shn_menu()
return output
# -----------------------------------------------------------------------------
# Skills
# -----------------------------------------------------------------------------
def skill():
"""
RESTful CRUD Controller
Lookup list of skill types
"""
return s3_rest_controller(prefix, "skill")
# -----------------------------------------------------------------------------
def credential():
"""
RESTful CRUD Controller
Select skills a volunteer has & validate them
"""
return s3_rest_controller(prefix, "skill_types")
# -----------------------------------------------------------------------------
def skillSearch():
"""
Search for Volunteers by Skill
- A Notification is sent to each matching volunteer
@ToDo: Make into a normal S3Search? (may need minor modification)
@ToDo: Make the Notification into a separate button (may want to search without notifications)
"""
from gluon.sqlhtml import CheckboxesWidget
vol_skill_widget = CheckboxesWidget().widget(db.vol_credential.skill_id, None)
search_btn = INPUT(_value = "search", _type = "submit")
search_form = FORM(vol_skill_widget, search_btn)
output = dict(search_form = search_form)
output["table"] = ""
if search_form.accepts(request.vars, session, keepvalues=True):
search_skill_ids = request.vars.skill_id
table1 = db.vol_credential
table2 = db.vol_skill
table3 = db.pr_person
#person_details = []
# Print a list of volunteers with their skills status.
# @ToDo: selects for only one skills right now. add displaying of skill name
vol_id = db((table2.id == table1.skill_id) & \
(table2.id == search_skill_ids)).select(table1.person_id)
vol_idset = []
html = DIV(DIV(B(T("List of Volunteers for this skill set"))))
for id in vol_id:
vol_idset.append(id.person_id)
for pe_id in vol_idset:
person_details = db((table3.id == pe_id)).select(table3.first_name, table3.middle_name, table3.last_name).first()
skillset = db(table1.person_id == pe_id).select(table1.status).first()
html.append(DIV(LABEL(vita.fullname(person_details)),DIV(T("Skill Status") + ": "), UL(skillset.status)))
# @ToDo: Make the notification message configurable
#msg.send_by_pe_id(pe_id, "CERT: Please Report for Duty", "We ask you to report for duty if you are available", 1, 1)
html.append(DIV(B(T("Volunteers were notified!"))))
#for one_pr in person_details:
#skillset = "approved"
#html += DIV(LABEL(vita.fullname(one_pr)),DIV(T("Skill Status") + ": "), UL(skillset), _id="table-container")
#person_data="<div>%s</div>" % str(person_details)
html2 = DIV(html, _id="table-container")
output["table"] = html2
return output
# -----------------------------------------------------------------------------
# Teams
# -----------------------------------------------------------------------------
def group():
"""
Team controller
- uses the group table from PR
"""
tablename = "pr_group"
table = db[tablename]
table.group_type.label = T("Team Type")
table.description.label = T("Team Description")
table.name.label = T("Team Name")
db.pr_group_membership.group_id.label = T("Team Id")
db.pr_group_membership.group_head.label = T("Team Leader")
# Set Defaults
db.pr_group.group_type.default = 3 # 'Relief Team'
db.pr_group.group_type.readable = db.pr_group.group_type.writable = False
# CRUD Strings
ADD_TEAM = T("Add Team")
LIST_TEAMS = T("List Teams")
s3.crud_strings[tablename] = Storage(
title_create = ADD_TEAM,
title_display = T("Team Details"),
title_list = LIST_TEAMS,
title_update = T("Edit Team"),
title_search = T("Search Teams"),
subtitle_create = T("Add New Team"),
subtitle_list = T("Teams"),
label_list_button = LIST_TEAMS,
label_create_button = ADD_TEAM,
label_search_button = T("Search Teams"),
msg_record_created = T("Team added"),
msg_record_modified = T("Team updated"),
msg_record_deleted = T("Team deleted"),
msg_list_empty = T("No Items currently registered"))
s3.crud_strings["pr_group_membership"] = Storage(
title_create = T("Add Member"),
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
title_search = T("Search Member"),
subtitle_create = T("Add New Member"),
subtitle_list = T("Current Team Members"),
label_list_button = T("List Members"),
label_create_button = T("Add Group Member"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Team Member added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Members currently registered"))
response.s3.filter = (db.pr_group.system == False) # do not show system groups
# Redirect to member list when a new group has been created
s3xrc.model.configure(db.pr_group,
create_next = URL(r=request, c="vol", f="group", args=["[id]", "group_membership"]))
s3xrc.model.configure(db.pr_group_membership,
list_fields=["id",
"person_id",
"group_head",
"description"])
s3xrc.model.configure(table, main="name", extra="description")
output = s3_rest_controller("pr", "group",
rheader=lambda jr: shn_pr_rheader(jr,
tabs = [(T("Team Details"), None),
(T("Address"), "address"),
(T("Contact Data"), "pe_contact"),
(T("Members"), "group_membership")]))
shn_menu()
return output
# -----------------------------------------------------------------------------
# Projects & Tasks
# -----------------------------------------------------------------------------
def project():
""" RESTful CRUD controller """
tabs = [
(T("Basic Details"), None),
#(T("Staff"), "staff"),
(T("Tasks"), "task"),
#(T("Donors"), "organisation"),
#(T("Sites"), "site"), # Ticket 195
]
rheader = lambda r: shn_project_rheader(r, tabs)
return s3_rest_controller("project", resourcename, rheader=rheader)
# -----------------------------------------------------------------------------
def task():
""" Manage current user's tasks """
tablename = "project_%s" % (resourcename)
table = db[tablename]
my_person_id = s3_logged_in_person()
if not my_person_id:
session.error = T("No person record found for current user.")
redirect(URL(r=request, f="index"))
table.person_id.default = my_person_id
#@ToDo: if not a team leader then:
# can only assign themselves tasks
response.s3.filter = (db.project_task.person_id == my_person_id)
s3.crud_strings[tablename].title_list = T("My Tasks")
s3.crud_strings[tablename].subtitle_list = T("Task List")
return s3_rest_controller("project", resourcename)
# -----------------------------------------------------------------------------
# Maps
# -----------------------------------------------------------------------------
def view_map():
"""
Show Location of a Volunteer on the Map
Use most recent presence if available, else any address that's available.
@ToDo: Convert to a custom method of the person resource
"""
person_id = request.args(0)
# Shortcuts
persons = db.pr_person
presences = db.pr_presence
locations = db.gis_location
# Include the person's last verified location, assuming that they're not missing
presence_query = (persons.id == person_id) & \
(persons.missing == False) & \
(presences.pe_id == persons.pe_id) & \
(presences.presence_condition.belongs(vita.PERSISTANT_PRESENCE)) & \
(presences.closed == False) & \
(locations.id == presences.location_id)
# Need sql.Rows object for show_map, so don't extract individual row yet.
features = db(presence_query).select(locations.id,
locations.lat,
locations.lon,
persons.id,
limitby=(0, 1))
if not features:
# Use their Address
address_query = (persons.id == person_id) & \
(db.pr_address.pe_id == persons.pe_id) & \
(locations.id == db.pr_address.location_id)
# @ToDo: Lookup their schedule to see whether they should be at Work, Home or Holiday & lookup the correct address
# For now, take whichever address is supplied first.
features = db(address_query).select(locations.id,
locations.lat,
locations.lon,
persons.id,
limitby=(0, 1))
if features:
# Center and zoom the map.
record = features.first()
lat = record.gis_location.lat
lon = record.gis_location.lon
zoom = 15
config = gis.get_config()
if not deployment_settings.get_security_map() or s3_has_role("MapAdmin"):
catalogue_toolbar = True
else:
catalogue_toolbar = False
# Standard Feature Layers
feature_queries = []
feature_layers = db(db.gis_layer_feature.enabled == True).select()
for layer in feature_layers:
_layer = gis.get_feature_layer(layer.module, layer.resource, layer.name, layer.popup_label, config=config, marker_id=layer.marker_id, active=False, polygons=layer.polygons)
if _layer:
feature_queries.append(_layer)
# Add the Volunteer layer
try:
marker_id = db(db.gis_marker.name == "volunteer").select().first().id
except:
marker_id = 1
# Can't use this since the location_id link is via pr_presence not pr_person
#_layer = gis.get_feature_layer("pr", "person", "Volunteer", "Volunteer", config=config, marker_id=marker_id, active=True, polygons=False)
#if _layer:
# feature_queries.append(_layer)
# Insert the name into the query & replace the location_id with the person_id
for i in range(0, len(features)):
features[i].gis_location.name = vita.fullname(db(db.pr_person.id == features[i].pr_person.id).select(limitby=(0, 1)).first())
features[i].gis_location.id = features[i].pr_person.id
feature_queries.append({"name" : "Volunteer",
"query" : features,
"active" : True,
"popup_label" : "Volunteer",
"popup_url" : URL(r=request, c="vol", f="popup") + "/<id>/read.plain",
"marker" : marker_id})
html = gis.show_map(
feature_queries = feature_queries,
catalogue_toolbar = catalogue_toolbar,
catalogue_overlays = True,
toolbar = True,
search = True,
lat = lat,
lon = lon,
zoom = zoom,
window = False # We should provide a button within the map to make it go full-screen (ideally without reloading the page!)
)
response.view = "vol/view_map.html"
return dict(map=html)
# Redirect to person details if no location is available
session.warning = T("No location known for this person")
redirect(URL(r=request, c="vol", f="person", args=[person_id, "presence"]))
def popup():
"""
Controller that returns a person's data
To be used to populate map popup
"""
person_id = request.args(0)
vol_query = (db.pr_person.id == person_id)
vol = db(vol_query).select(db.pr_person.first_name, db.pr_person.middle_name, db.pr_person.last_name, limitby=(0, 1)).first()
skill_query = (db.vol_skill.person_id == person_id) & (db.vol_skill.skill_types_id == db.vol_skill_types.id)
skills = db(skill_query).select(db.vol_skill_types.name)
skillset = []
for s in skills:
skillset.append(s.name)
if len(skillset) == 0:
skillset.append(T("n/a"))
html = DIV(LABEL(vita.fullname(vol)), DIV(T("Skills") + ": "), UL(skillset), _id="table-container")
return dict(html=html)
# -----------------------------------------------------------------------------
def view_team_map():
"""
Show Locations of a Team of Volunteers on the Map
Use most recent presence for each if available
"""
# @ToDo: Convert to a custom method of the group resource
# Currently all presence records created in vol have condition set to
# confirmed (see person controller's prep). Then we ignore records that
# are not confirmed. This does not guarantee that only vol-specific
# records are used, but if other modules use confirmed to mean the
# presence record is valid, that is probably acceptable. @ToDo: Could
# we make use of some of the other presence conditions, like transit and
# check-in/out? @ToDo: Is it proper to exclude conditions like missing?
# What if the team manager wants to know what happened to their volunteers?
# Could indicate status, e.g., by marker color or in popup.
group_id = request.args(0)
# Get a list of team (group) member ids.
members_query = (db.pr_group_membership.group_id == group_id)
members = db(members_query).select(db.pr_group_membership.person_id)
member_person_ids = [ x.person_id for x in members ]
# Presence data of the members with Presence Logs:
# Get only valid presence data for each person. Here, valid means
# not closed (a closed presence has been explicitly marked no longer
# valid) and the presence condition is confirmed (all presences made
# in the vol module are set to confirmed). Also exclude missing
# persons. See @ToDo re. possible alternate uses of condition.
# Note the explicit tests against False are due to a Web2py issue:
# Use of unary negation leads to a syntax error in the generated SQL.
presence_rows = db(
db.pr_person.id.belongs(member_person_ids) &
(db.pr_person.missing == False) &
(db.pr_presence.pe_id == db.pr_person.pe_id) &
db.pr_presence.presence_condition.belongs(vita.PERSISTANT_PRESENCE) &
(db.pr_presence.closed == False) &
(db.gis_location.id == db.pr_presence.location_id)).select(
db.gis_location.ALL,
db.pr_person.id,
db.pr_person.first_name,
db.pr_person.middle_name,
db.pr_person.last_name,
orderby=~db.pr_presence.datetime)
# Get latest presence data for each person.
# Note sort is stable, so preserves time order.
person_location_sort = presence_rows.sort(lambda row:row.pr_person.id)
previous_person_id = None
features = []
for row in person_location_sort:
if row.pr_person.id != previous_person_id:
features.append(row)
member_person_ids.remove(row.pr_person.id)
previous_person_id = row.pr_person.id
# Get addresses of those members without presence data.
address_rows = db(
db.pr_person.id.belongs(member_person_ids) &
(db.pr_address.pe_id == db.pr_person.pe_id) &
(db.gis_location.id == db.pr_address.location_id)).select(
db.gis_location.ALL,
db.pr_person.id,
db.pr_person.first_name,
db.pr_person.middle_name,
db.pr_person.last_name)
features.extend(address_rows)
if features:
config = gis.get_config()
catalogue_toolbar = not deployment_settings.get_security_map() or s3_has_role("MapAdmin")
# Standard Feature Layers
feature_queries = []
feature_layers = db(db.gis_layer_feature.enabled == True).select()
for layer in feature_layers:
_layer = gis.get_feature_layer(layer.module,
layer.resource,
layer.name,
layer.popup_label,
config=config,
marker_id=layer.marker_id,
active=False,
polygons=layer.polygons)
if _layer:
feature_queries.append(_layer)
# Add the Volunteer layer
try:
marker_id = db(db.gis_marker.name == "volunteer").select().first().id
except:
# @ToDo Why not fall back to the person marker?
marker_id = 1
# Insert the name into the query & replace the location_id with the
# person_id.
for feature in features:
names = Storage(first_name = feature.pr_person.first_name,
middle_name = feature.pr_person.middle_name,
last_name = feature.pr_person.last_name)
feature.gis_location.name = vita.fullname(names)
feature.gis_location.id = feature.pr_person.id
feature_queries.append({"name" : "Volunteers",
"query" : features,
"active" : True,
"popup_label" : "Volunteer",
"popup_url" : URL(r=request, c="vol", f="popup") + "/<id>/read.plain",
"marker" : marker_id})
bounds = gis.get_bounds(features=features)
html = gis.show_map(
feature_queries = feature_queries,
catalogue_toolbar = catalogue_toolbar,
catalogue_overlays = True,
toolbar = True,
search = True,
bbox = bounds,
window = True) # @ToDo: Change to False & create a way to convert an embedded map to a full-screen one without a screen refresh
response.view = "vol/view_map.html"
return dict(map=html)
# Redirect to team member list if no locations are available.
session.warning = T("No locations found for members of this team")
redirect(URL(r=request, c="vol", f="group",
args=[group_id, "group_membership"]))
# -----------------------------------------------------------------------------
def view_project_map():
"""
Show Location of all Tasks on the Map
@ToDo: Different Colours for Status
Green for Complete
Red for Urgent/Incomplete
Amber for Non-Urgent/Incomplete
@ToDo: A single map with both Tasks & Volunteers displayed on it
@ToDo: Convert to a custom method of the project resource
"""
project_id = request.args(0)
# Shortcuts
tasks = db.project_task
locations = db.gis_location
features = db((tasks.project_id == project_id) & \
(locations.id == tasks.location_id)).select(locations.id,
locations.lat,
locations.lon,
locations.lat_min,
locations.lat_max,
locations.lon_min,
locations.lon_max,
tasks.subject,
tasks.status,
tasks.urgent,
tasks.id)
if features:
if len(features) > 1:
# Set the viewport to the appropriate area to see all the tasks
bounds = gis.get_bounds(features=features)
else:
# A 1-task bounds zooms in too far for many tilesets
lat = features.first().gis_location.lat
lon = features.first().gis_location.lon
zoom = 15
config = gis.get_config()
if not deployment_settings.get_security_map() or s3_has_role("MapAdmin"):
catalogue_toolbar = True
else:
catalogue_toolbar = False
# Standard Feature Layers
feature_queries = []
feature_layers = db(db.gis_layer_feature.enabled == True).select()
for layer in feature_layers:
_layer = gis.get_feature_layer(layer.module, layer.resource, layer.name, layer.popup_label, config=config, marker_id=layer.marker_id, active=False, polygons=layer.polygons)
if _layer:
feature_queries.append(_layer)
# Add the Tasks layer
# Can't use this since we want to use different colours, not markers
#_layer = gis.get_feature_layer("project", "task", "Tasks", "Task", config=config, marker_id=marker_id, active=True, polygons=False)
#if _layer:
# feature_queries.append(_layer)
# Insert the name into the query & replace the location_id with the task_id
for i in range(0, len(features)):
features[i].gis_location.name = features[i].project_task.subject
features[i].gis_location.id = features[i].project_task.id
features[i].gis_location.shape = "circle"
if features[i].project_task.status in [3, 4, 6]:
# Green for 'Completed', 'Postponed' or 'Cancelled'
features[i].gis_location.color = "green"
elif features[i].project_task.status == 1 and features[i].project_task.urgent == True:
# Red for 'Urgent' and 'New' (i.e. Unassigned)
features[i].gis_location.color = "red"
else:
# Amber for 'Feedback' or 'non-urgent'
features[i].gis_location.color = " #FFBF00"
feature_queries.append({
"name" : "Tasks",
"query" : features,
"active" : True,
"popup_label" : "Task",
"popup_url" : URL(r=request, c="project", f="task") + "/<id>/read.plain"
})
try:
# bbox
html = gis.show_map(
feature_queries = feature_queries,
catalogue_toolbar = catalogue_toolbar,
catalogue_overlays = True,
toolbar = True,
search = True,
bbox = bounds,
window = True, # @ToDo Change to False & create a way to convert an embedded map to a full-screen one without a screen refresh
)
except:
# lat/lon/zoom
html = gis.show_map(
feature_queries = feature_queries,
catalogue_toolbar = catalogue_toolbar,
catalogue_overlays = True,
toolbar = True,
search = True,
lat = lat,
lon = lon,
zoom = zoom,
window = True, # @ToDo Change to False & create a way to convert an embedded map to a full-screen one without a screen refresh
)
response.view = "vol/view_map.html"
return dict(map=html)
# Redirect to tasks if no task location is available
session.warning = T("No Tasks with Location Data")
redirect(URL(r=request, c="vol", f="project", args=[project_id, "task"]))
# -----------------------------------------------------------------------------
def view_offices_map():
"""
Show Location of all Offices on the Map
- optionally filter by those within a radius of a specific Event (Project)
"""
project_id = None
radius = None
if "project_id" in request.vars:
project_id = request.vars.project_id
if "radius" in request.vars:
radius = request.vars.radius
# Shortcuts
projects = db.project_project
offices = db.org_office
locations = db.gis_location
if project_id and radius:
# @ToDo: Optimise by doing a single SQL query with the Spatial one
project_locations = db((projects.id == project_id) & (locations.id == projects.location_id)).select(locations.id,
locations.lat,
locations.lon,
locations.lat_min,
locations.lat_max,
locations.lon_min,
locations.lon_max,
projects.code,
projects.id,
limitby=(0, 1))
project_location = project_locations.first()
lat = project_location.gis_location.lat
lon = project_location.gis_location.lon
if (lat is None) or (lon is None):
# Zero is allowed
session.error = T("Project has no Lat/Lon")
redirect(URL(r=request, c="vol", f="project", args=[project_id]))
# Perform the Spatial query
features = gis.get_features_in_radius(lat, lon, radius, tablename="org_office")
# @ToDo: we also want the Project to show (with different Icon): project_locations set ready
else:
features = db((offices.id > 0) & \
(locations.id == offices.location_id)).select(locations.id,
locations.lat,
locations.lon,
locations.lat_min,
locations.lat_max,
locations.lon_min,
locations.lon_max,
offices.name,
offices.id)
if features:
if len(features) > 1:
# Set the viewport to the appropriate area to see all the tasks
bounds = gis.get_bounds(features=features)
else:
# A 1-task bounds zooms in too far for many tilesets
lat = features[0].gis_location.lat
lon = features[0].gis_location.lon
zoom = 15
config = gis.get_config()
if not deployment_settings.get_security_map() or s3_has_role("MapAdmin"):
catalogue_toolbar = True
else:
catalogue_toolbar = False
# Standard Feature Layers
feature_queries = []
feature_layers = db(db.gis_layer_feature.enabled == True).select()
for layer in feature_layers:
_layer = gis.get_feature_layer(layer.module, layer.resource, layer.name, layer.popup_label, config=config, marker_id=layer.marker_id, active=False, polygons=layer.polygons)
if _layer:
feature_queries.append(_layer)
# Add the Offices layer
# Can't use this since we may have a custom spatial query
#_layer = gis.get_feature_layer("org", "office", "Offices", "Office", config=config, marker_id=marker_id, active=True, polygons=False)
#if _layer:
# feature_queries.append(_layer)
try:
office_marker_id = db(db.gis_marker.name == "office").select().first().id
except:
office_marker_id = 1
# Insert the name into the query & replace the location_id with the office_id
for i in range(0, len(features)):
features[i].gis_location.name = features[i].org_office.name
features[i].gis_location.id = features[i].org_office.id
# If a Project
# features[i].gis_location.shape = "circle"
# if features[i].project_task.status in [3, 4, 6]:
# # Green for 'Completed', 'Postponed' or 'Cancelled'
# features[i].gis_location.color = "green"
# elif features[i].project_task.status == 1 and features[i].project_task.urgent == True:
# # Red for 'Urgent' and 'New' (i.e. Unassigned)
# features[i].gis_location.color = "red"
# else:
# # Amber for 'Feedback' or 'non-urgent'
# features[i].gis_location.color = " #FFBF00"
feature_queries.append({
"name" : "Tasks",
"query" : features,
"active" : True,
"popup_label" : "Task",
"popup_url" : URL(r=request, c="org", f="office") + "/<id>/read.plain",
"marker" : office_marker_id
})
try:
# Are we using bbox?
html = gis.show_map(
feature_queries = feature_queries,
catalogue_toolbar = catalogue_toolbar,
catalogue_overlays = True,
toolbar = True,
search = True,
bbox = bounds,
window = True, # @ToDo: Change to False & create a way to convert an embedded map to a full-screen one without a screen refresh
)
except:
# No: Lat/Lon/Zoom
html = gis.show_map(
feature_queries = feature_queries,
catalogue_toolbar = catalogue_toolbar,
catalogue_overlays = True,
toolbar = True,
search = True,
lat = lat,
lon = lon,
zoom = zoom,
window = True, # @ToDo: Change to False & create a way to convert an embedded map to a full-screen one without a screen refresh
)
response.view = "vol/view_map.html"
return dict(map=html)
else:
# Redirect to offices if none found
session.error = T("No Offices found!")
redirect(URL(r=request, c="org", f="office"))
# -----------------------------------------------------------------------------
# Messaging
# -----------------------------------------------------------------------------
def compose_person():
""" Send message to volunteer """
person_pe_id_query = (db.pr_person.id == request.vars.person_id)
pe_id_row = db(person_pe_id_query).select(db.pr_person.pe_id).first()
request.vars.pe_id = pe_id_row["pe_id"]
return shn_msg_compose(redirect_module=prefix,
redirect_function="compose_person",
redirect_vars={"person_id":request.vars.person_id},
title_name="Send a message to a volunteer")
# -----------------------------------------------------------------------------
def compose_group():
""" Send message to members of a team """
group_pe_id_query = (db.pr_group.id == request.vars.group_id)
pe_id_row = db(group_pe_id_query).select(db.pr_group.pe_id).first()
request.vars.pe_id = pe_id_row["pe_id"]
return shn_msg_compose(redirect_module=prefix,
redirect_function="compose_group",
redirect_vars={"group_id":request.vars.group_id},
title_name="Send a message to a team of volunteers")
# -----------------------------------------------------------------------------
|
ptressel/sahana-eden-madpub
|
controllers/vol.py
|
Python
|
mit
| 48,999
|
[
"Amber"
] |
9c933ddae0f05a397a67aa8eb736dfb5898850bde517b4b0da8caed3d9582448
|
../../../../../../../share/pyshared/orca/scripts/apps/soffice/formatting.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/soffice/formatting.py
|
Python
|
gpl-3.0
| 75
|
[
"ORCA"
] |
2faabe0d317d859ecf91901406b3c44f1f3779a1cffe23ba61eef8341813fa38
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import os
import six
from taskflow.utils import iter_utils
from taskflow.utils import misc
class FrozenNode(Exception):
"""Exception raised when a frozen node is modified."""
def __init__(self):
super(FrozenNode, self).__init__("Frozen node(s) can't be modified")
class _DFSIter(object):
"""Depth first iterator (non-recursive) over the child nodes."""
def __init__(self, root, include_self=False):
self.root = root
self.include_self = bool(include_self)
def __iter__(self):
stack = []
if self.include_self:
stack.append(self.root)
else:
stack.extend(self.root.reverse_iter())
while stack:
node = stack.pop()
# Visit the node.
yield node
# Traverse the left & right subtree.
stack.extend(node.reverse_iter())
class _BFSIter(object):
"""Breadth first iterator (non-recursive) over the child nodes."""
def __init__(self, root, include_self=False):
self.root = root
self.include_self = bool(include_self)
def __iter__(self):
q = collections.deque()
if self.include_self:
q.append(self.root)
else:
q.extend(self.root.reverse_iter())
while q:
node = q.popleft()
# Visit the node.
yield node
# Traverse the left & right subtree.
q.extend(node.reverse_iter())
class Node(object):
"""A n-ary node class that can be used to create tree structures."""
#: Default string prefix used in :py:meth:`.pformat`.
STARTING_PREFIX = ""
#: Default string used to create empty space used in :py:meth:`.pformat`.
EMPTY_SPACE_SEP = " "
HORIZONTAL_CONN = "__"
"""
Default string used to horizontally connect a node to its
parent (used in :py:meth:`.pformat`.).
"""
VERTICAL_CONN = "|"
"""
Default string used to vertically connect a node to its
parent (used in :py:meth:`.pformat`).
"""
#: Default line separator used in :py:meth:`.pformat`.
LINE_SEP = os.linesep
def __init__(self, item, **kwargs):
self.item = item
self.parent = None
self.metadata = dict(kwargs)
self.frozen = False
self._children = []
def freeze(self):
if not self.frozen:
# This will DFS until all children are frozen as well, only
# after that works do we freeze ourselves (this makes it so
# that we don't become frozen if a child node fails to perform
# the freeze operation).
for n in self:
n.freeze()
self.frozen = True
@misc.disallow_when_frozen(FrozenNode)
def add(self, child):
"""Adds a child to this node (appends to left of existing children).
NOTE(harlowja): this will also set the childs parent to be this node.
"""
child.parent = self
self._children.append(child)
def empty(self):
"""Returns if the node is a leaf node."""
return self.child_count() == 0
def path_iter(self, include_self=True):
"""Yields back the path from this node to the root node."""
if include_self:
node = self
else:
node = self.parent
while node is not None:
yield node
node = node.parent
def find_first_match(self, matcher, only_direct=False, include_self=True):
"""Finds the *first* node that matching callback returns true.
This will search not only this node but also any children nodes (in
depth first order, from right to left) and finally if nothing is
matched then ``None`` is returned instead of a node object.
:param matcher: callback that takes one positional argument (a node)
and returns true if it matches desired node or false
if not.
:param only_direct: only look at current node and its
direct children (implies that this does not
search using depth first).
:param include_self: include the current node during searching.
:returns: the node that matched (or ``None``)
"""
if only_direct:
if include_self:
it = itertools.chain([self], self.reverse_iter())
else:
it = self.reverse_iter()
else:
it = self.dfs_iter(include_self=include_self)
return iter_utils.find_first_match(it, matcher)
def find(self, item, only_direct=False, include_self=True):
"""Returns the *first* node for an item if it exists in this node.
This will search not only this node but also any children nodes (in
depth first order, from right to left) and finally if nothing is
matched then ``None`` is returned instead of a node object.
:param item: item to look for.
:param only_direct: only look at current node and its
direct children (implies that this does not
search using depth first).
:param include_self: include the current node during searching.
:returns: the node that matched provided item (or ``None``)
"""
return self.find_first_match(lambda n: n.item == item,
only_direct=only_direct,
include_self=include_self)
def disassociate(self):
"""Removes this node from its parent (if any).
:returns: occurences of this node that were removed from its parent.
"""
occurrences = 0
if self.parent is not None:
p = self.parent
self.parent = None
# Remove all instances of this node from its parent.
while True:
try:
p._children.remove(self)
except ValueError:
break
else:
occurrences += 1
return occurrences
def remove(self, item, only_direct=False, include_self=True):
"""Removes a item from this nodes children.
This will search not only this node but also any children nodes and
finally if nothing is found then a value error is raised instead of
the normally returned *removed* node object.
:param item: item to lookup.
:param only_direct: only look at current node and its
direct children (implies that this does not
search using depth first).
:param include_self: include the current node during searching.
"""
node = self.find(item, only_direct=only_direct,
include_self=include_self)
if node is None:
raise ValueError("Item '%s' not found to remove" % item)
else:
node.disassociate()
return node
def __contains__(self, item):
"""Returns whether item exists in this node or this nodes children.
:returns: if the item exists in this node or nodes children,
true if the item exists, false otherwise
:rtype: boolean
"""
return self.find(item) is not None
def __getitem__(self, index):
# NOTE(harlowja): 0 is the right most index, len - 1 is the left most
return self._children[index]
def pformat(self, stringify_node=None,
linesep=LINE_SEP, vertical_conn=VERTICAL_CONN,
horizontal_conn=HORIZONTAL_CONN, empty_space=EMPTY_SPACE_SEP,
starting_prefix=STARTING_PREFIX):
"""Formats this node + children into a nice string representation.
**Example**::
>>> from taskflow.types import tree
>>> yahoo = tree.Node("CEO")
>>> yahoo.add(tree.Node("Infra"))
>>> yahoo[0].add(tree.Node("Boss"))
>>> yahoo[0][0].add(tree.Node("Me"))
>>> yahoo.add(tree.Node("Mobile"))
>>> yahoo.add(tree.Node("Mail"))
>>> print(yahoo.pformat())
CEO
|__Infra
| |__Boss
| |__Me
|__Mobile
|__Mail
"""
if stringify_node is None:
# Default to making a unicode string out of the nodes item...
stringify_node = lambda node: six.text_type(node.item)
expected_lines = self.child_count(only_direct=False) + 1
buff = six.StringIO()
conn = vertical_conn + horizontal_conn
stop_at_parent = self
for i, node in enumerate(self.dfs_iter(include_self=True), 1):
prefix = []
connected_to_parent = False
last_node = node
# Walk through *most* of this nodes parents, and form the expected
# prefix that each parent should require, repeat this until we
# hit the root node (self) and use that as our nodes prefix
# string...
parent_node_it = iter_utils.while_is_not(
node.path_iter(include_self=True), stop_at_parent)
for j, parent_node in enumerate(parent_node_it):
if parent_node is stop_at_parent:
if j > 0:
if not connected_to_parent:
prefix.append(conn)
connected_to_parent = True
else:
# If the node was connected already then it must
# have had more than one parent, so we want to put
# the right final starting prefix on (which may be
# a empty space or another vertical connector)...
last_node = self._children[-1]
m = last_node.find_first_match(lambda n: n is node,
include_self=False,
only_direct=False)
if m is not None:
prefix.append(empty_space)
else:
prefix.append(vertical_conn)
elif parent_node is node:
# Skip ourself... (we only include ourself so that
# we can use the 'j' variable to determine if the only
# node requested is ourself in the first place); used
# in the first conditional here...
pass
else:
if not connected_to_parent:
prefix.append(conn)
spaces = len(horizontal_conn)
connected_to_parent = True
else:
# If we have already been connected to our parent
# then determine if this current node is the last
# node of its parent (and in that case just put
# on more spaces), otherwise put a vertical connector
# on and less spaces...
if parent_node[-1] is not last_node:
prefix.append(vertical_conn)
spaces = len(horizontal_conn)
else:
spaces = len(conn)
prefix.append(empty_space * spaces)
last_node = parent_node
prefix.append(starting_prefix)
for prefix_piece in reversed(prefix):
buff.write(prefix_piece)
buff.write(stringify_node(node))
if i != expected_lines:
buff.write(linesep)
return buff.getvalue()
def child_count(self, only_direct=True):
"""Returns how many children this node has.
This can be either only the direct children of this node or inclusive
of all children nodes of this node (children of children and so-on).
NOTE(harlowja): it does not account for the current node in this count.
"""
if not only_direct:
return iter_utils.count(self.dfs_iter())
return len(self._children)
def __iter__(self):
"""Iterates over the direct children of this node (right->left)."""
for c in self._children:
yield c
def reverse_iter(self):
"""Iterates over the direct children of this node (left->right)."""
for c in reversed(self._children):
yield c
def index(self, item):
"""Finds the child index of a given item, searches in added order."""
index_at = None
for (i, child) in enumerate(self._children):
if child.item == item:
index_at = i
break
if index_at is None:
raise ValueError("%s is not contained in any child" % (item))
return index_at
def dfs_iter(self, include_self=False):
"""Depth first iteration (non-recursive) over the child nodes."""
return _DFSIter(self, include_self=include_self)
def bfs_iter(self, include_self=False):
"""Breadth first iteration (non-recursive) over the child nodes."""
return _BFSIter(self, include_self=include_self)
|
junneyang/taskflow
|
taskflow/types/tree.py
|
Python
|
apache-2.0
| 14,198
|
[
"VisIt"
] |
3541f065a06b30f6ac9c3fd54214fa89edb2b57dc04f28eca61712f0b658f6e2
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to create objects to run tests with."""
__author__ = '[email protected] (Kevin Winter)'
from adspygoogle.common import Utils
from tests.adspygoogle.adwords import HTTP_PROXY
from tests.adspygoogle.adwords import SERVER_V201206 as SERVER
from tests.adspygoogle.adwords import VERSION_V201206 as VERSION
def CreateTestCampaign(client):
"""Creates a CPC campaign to run tests with.
Args:
client: AdWordsClient client to obtain services from.
Returns:
int CampaignId
"""
campaign_service = client.GetCampaignService(SERVER, VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Campaign #%s' % Utils.GetUniqueName(),
'status': 'PAUSED',
'biddingStrategy': {
'type': 'ManualCPC'
},
'budget': {
'period': 'DAILY',
'amount': {
'microAmount': '10000000'
},
'deliveryMethod': 'STANDARD'
},
'settings': [
{
'xsi_type': 'KeywordMatchSetting',
'optIn': 'false'
}
]
}
}]
return campaign_service.Mutate(
operations)[0]['value'][0]['id']
def CreateTestRTBCampaign(client):
"""Creates a CPM campaign to run tests with.
Args:
client: AdWordsClient client to obtain services from.
Returns:
int CampaignId
"""
campaign_service = client.GetCampaignService(SERVER, VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Campaign #%s' % Utils.GetUniqueName(),
'status': 'PAUSED',
'biddingStrategy': {
'type': 'ManualCPM'
},
'budget': {
'period': 'DAILY',
'amount': {
'microAmount': '10000000'
},
'deliveryMethod': 'STANDARD'
},
'settings': [{
'xsi_type': 'RealTimeBiddingSetting',
'optIn': 'true'
}, {
'xsi_type': 'KeywordMatchSetting',
'optIn': 'false'
}]
}
}]
return campaign_service.Mutate(
operations)[0]['value'][0]['id']
def CreateTestAdGroup(client, campaign_id):
"""Creates a CPC AdGroup to run tests with.
Args:
client: AdWordsClient client to obtain services from.
campaign_id: int ID of a CPC Campaign.
Returns:
int AdGroupId
"""
ad_group_service = client.GetAdGroupService(SERVER, VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'AdGroup #%s' % Utils.GetUniqueName(),
'status': 'ENABLED',
'bids': {
'type': 'ManualCPCAdGroupBids',
'keywordMaxCpc': {
'amount': {
'microAmount': '1000000'
}
}
}
}
}]
ad_groups = ad_group_service.Mutate(operations)[0]['value']
return ad_groups[0]['id']
def CreateTestCPMAdGroup(client, campaign_id):
"""Creates a CPM AdGroup to run tests with.
Args:
client: AdWordsClient client to obtain services from.
campaign_id: int ID of a CPM Campaign.
Returns:
int AdGroupId
"""
ad_group_service = client.GetAdGroupService(SERVER, VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'AdGroup #%s' % Utils.GetUniqueName(),
'status': 'ENABLED',
'bids': {
'type': 'ManualCPMAdGroupBids',
'maxCpm': {
'amount': {
'microAmount': '1000000'
}
}
}
}
}]
ad_groups = ad_group_service.Mutate(operations)[0]['value']
return ad_groups[0]['id']
def CreateTestAd(client, ad_group_id):
"""Creates an Ad for running tests with.
Args:
client: AdWordsClient client to obtain services from.
ad_group_id: int ID of the AdGroup the Ad should belong to.
Returns:
int AdGroupAdId
"""
ad_group_ad_service = client.GetAdGroupAdService(SERVER, VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
},
'status': 'ENABLED',
}
}]
ads = ad_group_ad_service.Mutate(operations)
return ads[0]['value'][0]['ad']['id']
def CreateTestKeyword(client, ad_group_id):
"""Creates a Keyword for running tests with.
Args:
client: AdWordsClient client to obtain services from.
ad_group_id: int ID of the AdGroup the Ad should belong to.
Returns:
int: KeywordId
"""
ad_group_criterion_service = client.GetAdGroupCriterionService(
SERVER, VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'mars cruise'
}
}
}]
criteria = ad_group_criterion_service.Mutate(operations)
return criteria[0]['value'][0]['criterion']['id']
def CreateTestPlacement(client, ad_group_id):
"""Creates a Placement for running tests with.
Args:
client: AdWordsClient client to obtain services from.
ad_group_id: int ID of the AdGroup the Ad should belong to.
Returns:
int: KeywordId
"""
ad_group_criterion_service = client.GetAdGroupCriterionService(
SERVER, VERSION, HTTP_PROXY)
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Placement',
'url': 'http://mars.google.com'
},
}
}]
criteria = ad_group_criterion_service.Mutate(operations)
return criteria[0]['value'][0]['criterion']['id']
def CreateTestLocationExtension(client, campaign_id):
"""Creates a Location Extension for testing.
Args:
client: AdWordsClient client to obtain services from.
campaign_id: int ID of a CPC Campaign.
Returns:
int Location Extension ID
"""
geo_location_service = client.GetGeoLocationService(
SERVER, VERSION,HTTP_PROXY)
campaign_ad_extension_service = client.GetCampaignAdExtensionService(
SERVER, VERSION, HTTP_PROXY)
selector = {
'addresses': [
{
'streetAddress': '1600 Amphitheatre Parkway',
'cityName': 'Mountain View',
'provinceCode': 'US-CA',
'provinceName': 'California',
'postalCode': '94043',
'countryCode': 'US'
}
]
}
geo_locations = geo_location_service.Get(selector)
# Construct operations and add campaign ad extension.
operations = [
{
'operator': 'ADD',
'operand': {
'xsi_type': 'CampaignAdExtension',
'campaignId': campaign_id,
'adExtension': {
'xsi_type': 'LocationExtension',
'address': geo_locations[0]['address'],
'geoPoint': geo_locations[0]['geoPoint'],
'encodedLocation': geo_locations[0]['encodedLocation'],
'source': 'ADWORDS_FRONTEND'
}
}
}
]
ad_extensions = campaign_ad_extension_service.Mutate(operations)[0]
ad_extension = ad_extensions['value'][0]
return ad_extension['adExtension']['id']
def GetExperimentIdForCampaign(client, campaign_id):
"""Retreives the ID of an ACTIVE experiment for the specified campaign.
Args:
client: AdWordsClient client to obtain services from.
campaign_id: int ID of a CPC Campaign.
Returns:
int Experiment ID
"""
selector = {
'fields': ['Id'],
'predicates': [{
'field': 'CampaignId',
'operator': 'EQUALS',
'values': [campaign_id]
}, {
'field': 'Status',
'operator': 'EQUALS',
'values': ['ACTIVE']
}]
}
experiment_service = client.GetExperimentService(SERVER, VERSION, HTTP_PROXY)
page = experiment_service.get(selector)[0]
return page['entries'][0]['id']
|
nearlyfreeapps/python-googleadwords
|
tests/adspygoogle/adwords/util/__init__.py
|
Python
|
apache-2.0
| 9,270
|
[
"VisIt"
] |
8a2fab5896e609518d09cc6207d5edd3915d5472b37213526fb652ae827ea313
|
# -*- coding: utf-8 -*-
""" Sahana Eden IRS Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3IRSModel",
"S3IRSResponseModel",
"irs_rheader"]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3IRSModel(S3Model):
names = ["irs_icategory",
"irs_ireport",
"irs_ireport_id"]
def model(self):
db = current.db
T = current.T
request = current.request
s3 = current.response.s3
settings = current.deployment_settings
location_id = self.gis_location_id
datetime_represent = S3DateTime.datetime_represent
# Shortcuts
add_component = self.add_component
configure = self.configure
define_table = self.define_table
meta_fields = s3.meta_fields
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# List of Incident Categories
# The keys are based on the Canadian ems.incident hierarchy, with a few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
irs_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
"missingPerson.amberAlert": T("Child Abduction Emergency"), # http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
"missingPerson.silver": T("Missing Senior Citizen"), # http://en.wikipedia.org/wiki/Silver_Alert
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# This Table defines which Categories are visible to end-users
tablename = "irs_icategory"
table = define_table(tablename,
Field("code", label = T("Category"),
requires = IS_IN_SET_LAZY(lambda: \
sort_dict_by_values(irs_incident_type_opts)),
represent = lambda opt: \
irs_incident_type_opts.get(opt, opt)),
*meta_fields())
configure(tablename,
onvalidation=self.irs_icategory_onvalidation,
list_fields=[ "code" ])
# ---------------------------------------------------------------------
# Reports
# This is a report of an Incident
#
# Incident Reports can be linked to Incidents through the event_incident_report table
#
# @ToDo: If not using the Events module, we could have a 'lead incident' to track duplicates?
#
# Porto codes
#irs_incident_type_opts = {
# 1100:T("Fire"),
# 6102:T("Hazmat"),
# 8201:T("Rescue")
#}
tablename = "irs_ireport"
table = define_table(tablename,
super_link("sit_id", "sit_situation"),
super_link("doc_id", "doc_entity"),
Field("name", label = T("Short Description"),
requires = IS_NOT_EMPTY()),
Field("message", "text", label = T("Message"),
represent = lambda text: \
s3_truncate(text, length=48, nice=True)),
Field("category", label = T("Category"),
# The full set available to Admins & Imports/Exports
# (users use the subset by over-riding this in the Controller)
requires = IS_NULL_OR(IS_IN_SET_LAZY(lambda: \
sort_dict_by_values(irs_incident_type_opts))),
# Use this instead if a simpler set of Options required
#requires = IS_NULL_OR(IS_IN_SET(irs_incident_type_opts)),
represent = lambda opt: \
irs_incident_type_opts.get(opt, opt)),
# Better to use a plain text field than to clutter the PR
Field("person",
readable = False,
writable = False,
label = T("Reporter Name"),
comment = (T("At/Visited Location (not virtual)"))),
Field("contact",
readable = False,
writable = False,
label = T("Contact Details")),
Field("datetime", "datetime",
default = request.utcnow,
label = T("Date/Time of Alert"),
widget = S3DateTimeWidget(future=0),
represent = lambda val: datetime_represent(val, utc=True),
requires = [IS_NOT_EMPTY(),
IS_UTC_DATETIME(allow_future=False)]),
Field("expiry", "datetime",
#readable = False,
#writable = False,
label = T("Expiry Date/Time"),
widget = S3DateTimeWidget(past=0),
represent = lambda val: datetime_represent(val, utc=True),
requires = IS_NULL_OR(IS_UTC_DATETIME())
),
location_id(),
# Very basic Impact Assessment
Field("affected", "integer",
label=T("Number of People Affected"),
represent = lambda val: val or T("unknown"),
),
Field("dead", "integer",
label=T("Number of People Dead"),
represent = lambda val: val or T("unknown"),
),
Field("injured", "integer",
label=T("Number of People Injured"),
represent = lambda val: val or T("unknown"),
),
# Probably too much to try & capture
#Field("missing", "integer",
# label=T("Number of People Missing")),
#Field("displaced", "integer",
# label=T("Number of People Displaced")),
Field("verified", "boolean", # Ushahidi-compatibility
# We don't want these visible in Create forms
# (we override in Update forms in controller)
readable = False,
writable = False,
label = T("Verified?"),
represent = lambda verified: \
(T("No"),
T("Yes"))[verified == True]),
# @ToDo: Move this to Events?
# Then display here as a Virtual Field
Field("dispatch", "datetime",
# We don't want these visible in Create forms
# (we override in Update forms in controller)
readable = False,
writable = False,
label = T("Date/Time of Dispatch"),
widget = S3DateTimeWidget(future=0),
requires = IS_EMPTY_OR(IS_UTC_DATETIME(allow_future=False))),
Field("closed", "boolean",
# We don't want these visible in Create forms
# (we override in Update forms in controller)
default = False,
readable = False,
writable = False,
label = T("Closed?"),
represent = lambda closed: \
(T("No"),
T("Yes"))[closed == True]),
s3.comments(),
*(s3.lx_fields() + meta_fields()))
# CRUD strings
ADD_INC_REPORT = T("Add Incident Report")
LIST_INC_REPORTS = T("List Incident Reports")
s3.crud_strings[tablename] = Storage(
title_create = ADD_INC_REPORT,
title_display = T("Incident Report Details"),
title_list = LIST_INC_REPORTS,
title_update = T("Edit Incident Report"),
title_upload = T("Import Incident Reports"),
title_search = T("Search Incident Reports"),
subtitle_create = T("Add New Incident Report"),
subtitle_list = T("Incident Reports"),
label_list_button = LIST_INC_REPORTS,
label_create_button = ADD_INC_REPORT,
label_delete_button = T("Delete Incident Report"),
msg_record_created = T("Incident Report added"),
msg_record_modified = T("Incident Report updated"),
msg_record_deleted = T("Incident Report deleted"),
msg_list_empty = T("No Incident Reports currently registered"))
ireport_search = S3Search(
advanced=(
S3SearchSimpleWidget(
name = "incident_search_simple",
label = T("Description"),
comment = T("You can search by description. You may use % as wildcard. Press 'Search' without input to list all incidents."),
field = ["name",
"message",
"comments",
]
),
S3SearchLocationHierarchyWidget(
name="incident_search_L1",
field="L1",
cols = 3,
),
S3SearchLocationHierarchyWidget(
name="incident_search_L2",
field="L2",
cols = 3,
),
S3SearchOptionsWidget(
name="incident_search_category",
field=["category"],
label = T("Category"),
cols = 3,
),
S3SearchMinMaxWidget(
name="incident_search_date",
method="range",
label=T("Date"),
field=["datetime"]
),
))
hierarchy = current.gis.get_location_hierarchy()
report_fields = [
"category",
"datetime",
(hierarchy["L1"], "L1"),
(hierarchy["L2"], "L2"),
]
# Resource Configuration
configure(tablename,
super_entity = ("sit_situation", "doc_entity"),
# Open tabs after creation
create_next = URL(args=["[id]", "update"]),
update_next = URL(args=["[id]", "update"]),
search_method = ireport_search,
report_filter=[
S3SearchLocationHierarchyWidget(
name="incident_search_L1",
field="L1",
cols = 3,
),
S3SearchLocationHierarchyWidget(
name="incident_search_L2",
field="L2",
cols = 3,
),
S3SearchOptionsWidget(
name="incident_search_category",
field=["category"],
label = T("Category"),
cols = 3,
),
S3SearchMinMaxWidget(
name="incident_search_date",
method="range",
label=T("Date"),
field=["datetime"]
),
],
report_rows = report_fields,
report_cols = report_fields,
report_fact = report_fields,
report_method=["count", "list"],
list_fields = ["id",
"name",
"category",
"datetime",
"location_id",
#"organisation_id",
"affected",
"dead",
"injured",
"verified",
"message",
])
# Components
# Tasks
add_component("project_task",
irs_ireport=Storage(link="project_task_ireport",
joinby="ireport_id",
key="task_id",
actuate="replace",
autocomplete="name",
autodelete=False))
# Vehicles
add_component("asset_asset",
irs_ireport=Storage(
link="irs_ireport_vehicle",
joinby="ireport_id",
key="asset_id",
name="vehicle",
# Dispatcher doesn't need to Add/Edit records, just Link
actuate="link",
autocomplete="name",
autodelete=False))
if settings.has_module("vehicle"):
link_table = "irs_ireport_vehicle_human_resource"
else:
link_table = "irs_ireport_human_resource"
add_component("hrm_human_resource",
irs_ireport=Storage(
link=link_table,
joinby="ireport_id",
key="human_resource_id",
# Dispatcher doesn't need to Add/Edit records, just Link
actuate="link",
autocomplete="name",
autodelete=False
)
)
ireport_id = S3ReusableField("ireport_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db,
"irs_ireport.id",
"%(name)s")),
represent = lambda id: \
(id and [db.irs_ireport[id].name] or [NONE])[0],
label = T("Incident"),
ondelete = "CASCADE")
# ---------------------------------------------------------------------
# Custom Methods
set_method("irs_ireport",
method="dispatch",
action=self.irs_dispatch)
set_method("irs_ireport",
method="timeline",
action=self.irs_timeline)
set_method("irs_ireport",
method="ushahidi",
action=self.irs_ushahidi_import)
if settings.has_module("fire"):
create_next = URL(args=["[id]", "human_resource"])
else:
create_next = URL(args=["[id]", "update"])
configure("irs_ireport",
create_onaccept=self.ireport_onaccept,
onvalidation=s3.lx_onvalidation,
create_next=create_next,
update_next=URL(args=["[id]", "update"])
)
# ---------------------------------------------------------------------
# Return model-global names to response.s3
#
return Storage(
irs_ireport_id = ireport_id,
irs_incident_type_opts = irs_incident_type_opts
)
# -------------------------------------------------------------------------
def defaults(self):
"""
Safe defaults for model-global names in case module is disabled
- used by events module
& legacy assess & impact modules
"""
ireport_id = S3ReusableField("ireport_id", "integer",
readable=False, writable=False)
return Storage(irs_ireport_id = ireport_id)
# -------------------------------------------------------------------------
@staticmethod
def irs_icategory_onvalidation(form):
"""
Incident Category Validation:
Prevent Duplicates
Done here rather than in .requires to maintain the dropdown.
"""
db = current.db
table = db.irs_icategory
category, error = IS_NOT_ONE_OF(db, "irs_icategory.code")(form.vars.code)
if error:
form.errors.code = error
return False
# -------------------------------------------------------------------------
@staticmethod
def ireport_onaccept(form):
"""
Assign the appropriate vehicle & on-shift team to the incident
@ToDo: Specialist teams
@ToDo: Make more generic (currently Porto-specific)
"""
db = current.db
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
if not settings.has_module("fire"):
return
vars = form.vars
ireport = vars.id
category = vars.category
if category == "1100":
# Fire
types = ["VUCI", "ABSC"]
elif category == "6102":
# Hazmat
types = ["VUCI", "VCOT"]
elif category == "8201":
# Rescue
types = ["VLCI", "ABSC"]
else:
types = ["VLCI"]
# 1st unassigned vehicle of the matching type
# @ToDo: Filter by Org/Base
# @ToDo: Filter by those which are under repair (asset_log)
table = s3db.irs_ireport_vehicle
stable = s3db.org_site
atable = s3db.asset_asset
vtable = s3db.vehicle_vehicle
ftable = s3db.fire_station
fvtable = s3db.fire_station_vehicle
for type in types:
query = (atable.type == s3db.asset_types["VEHICLE"]) & \
(vtable.type == type) & \
(vtable.asset_id == atable.id) & \
(atable.deleted == False) & \
((table.id == None) | \
(table.closed == True) | \
(table.deleted == True))
left = table.on(atable.id == table.asset_id)
vehicle = db(query).select(atable.id,
left=left,
limitby=(0, 1)).first()
if vehicle:
current.manager.load("vehicle_vehicle")
vehicle = vehicle.id
query = (vtable.asset_id == vehicle) & \
(fvtable.vehicle_id == vtable.id) & \
(ftable.id == fvtable.station_id) & \
(stable.id == ftable.site_id)
site = db(query).select(stable.id,
limitby=(0, 1)).first()
if site:
site = site.id
table.insert(ireport_id=ireport,
asset_id=vehicle,
site_id=site)
if settings.has_module("hrm"):
# Assign 1st 5 human resources on-shift
# @ToDo: We shouldn't assign people to vehicles automatically - this is done as people are ready
# - instead we should simply assign people to the incident & then use a drag'n'drop interface to link people to vehicles
# @ToDo: Filter by Base
table = s3db.irs_ireport_vehicle_human_resource
htable = s3db.hrm_human_resource
on_shift = s3db.fire_staff_on_duty()
query = on_shift & \
((table.id == None) | \
(table.closed == True) | \
(table.deleted == True))
left = table.on(htable.id == table.human_resource_id)
people = db(query).select(htable.id,
left=left,
limitby=(0, 5))
# @ToDo: Find Ranking person to be incident commander
leader = people.first()
if leader:
leader = leader.id
for person in people:
if person.id == leader.id:
table.insert(ireport_id=ireport,
asset_id=vehicle,
human_resource_id=person.id,
incident_commander=True)
else:
table.insert(ireport_id=ireport,
asset_id=vehicle,
human_resource_id=person.id)
# -------------------------------------------------------------------------
@staticmethod
def irs_dispatch(r, **attr):
"""
Send a Dispatch notice from an Incident Report
- this will be formatted as an OpenGeoSMS
"""
T = current.T
msg = current.msg
response = current.response
if r.representation == "html" and \
r.name == "ireport" and r.id and not r.component:
record = r.record
text = "%s %s:%s; %s" % (record.name,
T("Contact"),
record.contact,
record.message)
# Encode the message as an OpenGeoSMS
message = msg.prepare_opengeosms(record.location_id,
code="ST",
map="google",
text=text)
# URL to redirect to after message sent
url = URL(c="irs",
f="ireport",
args=r.id)
# Create the form
output = msg.compose(type="SMS",
recipient_type = "pr_person",
message = message,
url = url)
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = T("Send Dispatch Update")
response.view = "msg/compose.html"
return output
else:
raise HTTP(501, BADMETHOD)
# -------------------------------------------------------------------------
@staticmethod
def irs_timeline(r, **attr):
"""
Display the Incidents on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
@ToDo: Play button
http://www.simile-widgets.org/wiki/Timeline_Moving_the_Timeline_via_Javascript
"""
if r.representation == "html" and r.name == "ireport":
import gluon.contrib.simplejson as json
T = current.T
db = current.db
s3db = current.s3db
request = current.request
response = current.response
session = current.session
s3 = response.s3
now = request.utcnow
itable = s3db.doc_image
dtable = s3db.doc_document
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % request.application)
# Add our control script
if session.s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % request.application)
# Add our data
# @ToDo: Make this the initial data & then collect extra via REST with a stylesheet
# add in JS using S3.timeline.eventSource.addMany(events) where events is a []
if r.record:
# Single record
rows = [r.record]
else:
# Multiple records
# @ToDo: Load all records & sort to closest in time
# http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d
r.resource.load(limit=2000)
rows = r.resource._rows
data = {'dateTimeFormat': 'iso8601',
'events': []
}
tl_start = now
tl_end = now
events = []
for row in rows:
# Dates
start = row.datetime or ""
if start:
if start < tl_start:
tl_start = start
if start > tl_end:
tl_end = start
start = start.isoformat()
end = row.expiry or ""
if end:
if end > tl_end:
tl_end = end
end = end.isoformat()
# Image
# Just grab the first one for now
query = (itable.deleted == False) & \
(itable.doc_id == row.doc_id)
image = db(query).select(itable.url,
limitby=(0, 1)).first()
if image:
image = image.url or ""
# URL
link = URL(args=[row.id])
events.append({'start': start,
'end': end,
'title': row.name,
'caption': row.message or "",
'description': row.message or "",
'image': image or "",
'link': link or ""
# @ToDo: Colour based on Category (More generically: Resource or Resource Type)
#'color' : 'blue'
})
data["events"] = events
data = json.dumps(data)
code = "".join(("""
S3.timeline.data = """, data, """;
S3.timeline.tl_start = '""", tl_start.isoformat(), """';
S3.timeline.tl_end = '""", tl_end.isoformat(), """';
S3.timeline.now = '""", now.isoformat(), """';
"""))
# Control our code in static/scripts/S3/s3.timeline.js
s3.js_global.append(code)
# Create the DIV
item = DIV(_id="s3timeline", _style="height: 400px; border: 1px solid #aaa; font-family: Trebuchet MS, sans-serif; font-size: 85%;")
output = dict(item = item)
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = T("Incident Timeline")
response.view = "timeline.html"
return output
else:
raise HTTP(501, BADMETHOD)
# -------------------------------------------------------------------------
@staticmethod
def irs_ushahidi_import(r, **attr):
"""
Import Incident Reports from Ushahidi
@ToDo: Deployment setting for Ushahidi instance URL
"""
import os
T = current.T
auth = current.auth
request = current.request
response = current.response
session = current.session
# Method is only available to Admins
system_roles = session.s3.system_roles
ADMIN = system_roles.ADMIN
if not auth.s3_has_role(ADMIN):
auth.permission.fail()
if r.representation == "html" and \
r.name == "ireport" and not r.component and not r.id:
url = r.get_vars.get("url", "http://")
title = T("Incident Reports")
subtitle = T("Import from Ushahidi Instance")
form = FORM(TABLE(TR(
TH("URL: "),
INPUT(_type="text", _name="url", _size="100", _value=url,
requires=[IS_URL(), IS_NOT_EMPTY()]),
TH(DIV(SPAN("*", _class="req", _style="padding-right: 5px;")))),
TR(TD("Ignore Errors?: "),
TD(INPUT(_type="checkbox", _name="ignore_errors", _id="ignore_errors"))),
TR("", INPUT(_type="submit", _value=T("Import")))))
label_list_btn = S3CRUD.crud_string(r.tablename, "title_list")
list_btn = A(label_list_btn,
_href=r.url(method="", vars=None),
_class="action-btn")
rheader = DIV(P("%s: http://wiki.ushahidi.com/doku.php?id=ushahidi_api" % T("API is documented here")),
P("%s URL: http://ushahidi.my.domain/api?task=incidents&by=all&resp=xml&limit=1000" % T("Example")))
output = dict(title=title,
form=form,
subtitle=subtitle,
list_btn=list_btn,
rheader=rheader)
if form.accepts(request.vars, session):
# "Exploit" the de-duplicator hook to count import items
import_count = [0]
def count_items(job, import_count = import_count):
if job.tablename == "irs_ireport":
import_count[0] += 1
self.configure("irs_report", deduplicate=count_items)
ireports = r.resource
ushahidi = form.vars.url
ignore_errors = form.vars.get("ignore_errors", None)
stylesheet = os.path.join(request.folder, "static", "formats",
"ushahidi", "import.xsl")
if os.path.exists(stylesheet) and ushahidi:
try:
success = ireports.import_xml(ushahidi,
stylesheet=stylesheet,
ignore_errors=ignore_errors)
except:
import sys
e = sys.exc_info()[1]
response.error = e
else:
if success:
count = import_count[0]
if count:
response.flash = "%s %s" % (import_count[0],
T("reports successfully imported."))
else:
response.flash = T("No reports available.")
else:
response.error = self.error
response.view = "create.html"
return output
else:
raise HTTP(501, BADMETHOD)
# =============================================================================
class S3IRSResponseModel(S3Model):
"""
Tables used when responding to Incident Reports
- with HRMs &/or Vehicles
Currently this has code specific to Porto Firefighters
@ToDo: Move these to Events module?
- the response shouldn't live within the reporting system?
"""
names = ["irs_ireport_human_resource",
"irs_ireport_vehicle",
"irs_ireport_vehicle_human_resource"]
def model(self):
db = current.db
T = current.T
request = current.request
s3 = current.response.s3
settings = current.deployment_settings
human_resource_id = self.hrm_human_resource_id
location_id = self.gis_location_id
ireport_id = self.irs_ireport_id
# ---------------------------------------------------------------------
# Staff assigned to an Incident
#
tablename = "irs_ireport_human_resource"
table = self.define_table(tablename,
ireport_id(),
# Simple dropdown is faster for a small team
human_resource_id(widget=None),
Field("incident_commander", "boolean",
default = False,
label = T("Incident Commander"),
represent = lambda incident_commander: \
(T("No"),
T("Yes"))[incident_commander == True]),
*s3.meta_fields())
if not current.deployment_settings.has_module("vehicle"):
return None
# ---------------------------------------------------------------------
# Vehicles assigned to an Incident
#
asset_id = self.asset_asset_id
tablename = "irs_ireport_vehicle"
table = self.define_table(tablename,
ireport_id(),
asset_id(
label = T("Vehicle"),
requires=self.irs_vehicle_requires
),
Field("datetime", "datetime",
label=T("Dispatch Time"),
widget = S3DateTimeWidget(future=0),
requires = IS_EMPTY_OR(IS_UTC_DATETIME(allow_future=False)),
default = request.utcnow),
self.super_link("site_id", "org_site"),
location_id(label=T("Destination")),
Field("closed",
# @ToDo: Close all assignments when Incident closed
readable=False,
writable=False),
s3.comments(),
*s3.meta_fields())
# Field options
table.site_id.label = T("Fire Station")
table.site_id.readable = True
# Populated from fire_station_vehicle
#table.site_id.writable = True
table.virtualfields.append(irs_ireport_vehicle_virtual_fields())
# ---------------------------------------------------------------------
# Which Staff are assigned to which Vehicle?
#
hr_represent = self.hrm_hr_represent
tablename = "irs_ireport_vehicle_human_resource"
table = self.define_table(tablename,
ireport_id(),
# Simple dropdown is faster for a small team
human_resource_id(represent=hr_represent,
requires = IS_ONE_OF(db,
"hrm_human_resource.id",
hr_represent,
#orderby="pr_person.first_name"
),
widget=None),
asset_id(label = T("Vehicle")),
Field("closed",
# @ToDo: Close all assignments when Incident closed
readable=False,
writable=False),
*s3.meta_fields())
# ---------------------------------------------------------------------
# Return model-global names to response.s3
#
return Storage(
)
# -------------------------------------------------------------------------
@staticmethod
def irs_vehicle_requires():
"""
Populate the dropdown widget for responding to an Incident Report
based on those vehicles which aren't already on-call
"""
db = current.db
s3db = current.s3db
s3 = response = current.response.s3
# Vehicles are a type of Asset
table = s3db.asset_asset
ltable = s3db.irs_ireport_vehicle
asset_represent = s3db.asset_asset_id.represent
# Filter to Vehicles which aren't already on a call
# @ToDo: Filter by Org/Base
# @ToDo: Filter out those which are under repair
query = (table.type == s3db.asset_types["VEHICLE"]) & \
(table.deleted == False) & \
((ltable.id == None) | \
(ltable.closed == True) | \
(ltable.deleted == True))
left = ltable.on(table.id == ltable.asset_id)
requires = IS_NULL_OR(IS_ONE_OF(db(query),
"asset_asset.id",
asset_represent,
left=left,
sort=True))
return requires
# =============================================================================
def irs_rheader(r, tabs=[]):
""" Resource component page header """
if r.representation == "html":
if r.record is None:
# List or Create form: rheader makes no sense here
return None
T = current.T
s3db = current.s3db
#s3 = current.response.s3
settings = current.deployment_settings
tabs = [(T("Report Details"), None),
(T("Photos"), "image"),
(T("Documents"), "document"),
(T("Vehicles"), "vehicle"),
(T("Staff"), "human_resource"),
(T("Tasks"), "task"),
]
if settings.has_module("msg"):
tabs.append((T("Dispatch"), "dispatch"))
rheader_tabs = s3_rheader_tabs(r, tabs)
if r.name == "ireport":
report = r.record
table = r.table
datetime = table.datetime.represent(report.datetime)
expiry = table.datetime.represent(report.expiry)
location = table.location_id.represent(report.location_id)
category = table.category.represent(report.category)
contact = ""
if report.person:
if report.contact:
contact = "%s (%s)" % (report.person, report.contact)
else:
contact = report.person
elif report.contact:
contact = report.contact
if contact:
contact = DIV(TH("%s: " % T("Contact")), TD(contact))
#create_request = A(T("Create Request"),
# _class="action-btn colorbox",
# _href=URL(c="req", f="req",
# args="create",
# vars={"format":"popup",
# "caller":"irs_ireport"}),
# _title=T("Add Request"))
#create_task = A(T("Create Task"),
# _class="action-btn colorbox",
# _href=URL(c="project", f="task",
# args="create",
# vars={"format":"popup",
# "caller":"irs_ireport"}),
# _title=T("Add Task"))
rheader = DIV(TABLE(
TR(
TH("%s: " % table.name.label), report.name,
TH("%s: " % table.datetime.label), datetime,
),
TR(
TH("%s: " % table.category.label), category,
TH("%s: " % table.expiry.label), expiry,
),
TR(
TH("%s: " % table.location_id.label), location,
contact,
),
TR(
TH("%s: " % table.message.label), TD(report.message or "",
_colspan=3),
)
),
#DIV(P(), create_request, " ", create_task, P()),
rheader_tabs)
return rheader
else:
return None
# =============================================================================
class irs_ireport_vehicle_virtual_fields:
"""
"""
extra_fields = ["datetime"]
def minutes(self):
request = current.request
try:
delta = request.utcnow - self.irs_ireport_vehicle.datetime
except:
return 0
return int(delta.seconds / 60)
# END =========================================================================
|
flavour/iscram
|
modules/eden/irs.py
|
Python
|
mit
| 52,100
|
[
"VisIt"
] |
78a0d4ff291d8339003ec8d12f98dcdca5d555597f5d4b3b0ee81e812bc17ada
|
import logging
import sys
import random
import math
import matplotlib
import numpy as np
def wmean(vals, weights=None):
np.average(vals, weights=weights)
def wstd(vals, weights=None):
"""
compute a weighted standard deviation
"""
mean = wmean(vals, weights)
vals = np.array(vals).astype(float)
weights = np.array(weights).astype(float)
weights /= weights.sum()
top = (weights * ((vals - mean)**2)).sum()
bot = weights.sum()
return math.sqrt(top / bot)
def prob_no_error(pop, errprob, npts):
"""
computers the probability from a population with error probability `errprob` that
a random sample of `npts` points will not contain any error points
@param pop population size
@param errprob probability of an error point (e.g., user specified lower bound)
@param npts sample size
"""
def choose(n, k):
n,k = int(n), int(k)
v = 0.
if k > n/2:
v += sum(map(math.log, xrange(k,n+1)))
v -= sum(map(math.log, xrange(1,n-k+1)))
else:
v += sum(map(math.log, xrange(n-k,n+1)))
v -= sum(map(math.log, xrange(1,k+1)))
return v
# c(pop*(1-errprob), npts) / c(pop, npts)
c1 = choose(pop*(1-errprob), npts)
c2 = choose(pop, npts)
return math.exp(c1 - c2)
def best_sample_size(pop, errprob, confidence=0.95):
"""
given a population and an error probability, computes the the minimum sample size `s` such
that with 95% confidence, `s` will contain at least one error point
"""
sample_size, best_prob = None, None
threshold = max(0, 1. - confidence)
mins, maxs = 1, pop
while maxs - mins > 20:
size = max(1, int((maxs + mins) / 2.))
#print size, '\t', prob_no_error(pop, errprob, size)
good = prob_no_error(pop, errprob, size) < threshold
if good:
# either this is the best, or we should update the ranges and
# look again
if prob_no_error(pop, errprob, size-1) < threshold:
maxs = size
continue
else:
return size
else:
mins = size+1
for size in xrange(mins, maxs+1, 1):
if prob_no_error(pop, errprob, size) < threshold:
return size
return pop
def sample_size(moe=0.1, pop=10, zval=2.58):
"""
sample size based on estimator closed form solutions
@param moe margin of error
@param pop population size (size of partition)
@param zval confidence interval (default: 99%) 95% is 1.96
"""
ss = ((zval**2) * 0.25) / (moe ** 2)
if pop:
ss = ss / (1 + (ss-1)/pop)
return min(ss, pop)
|
sirrice/scorpion
|
scorpion/util/prob.py
|
Python
|
mit
| 2,501
|
[
"MOE"
] |
375d5af58fcaa9065c38cb6f0171dfd5925c8c9f7f7e50ccbb0a24ae335017a1
|
from __future__ import print_function
# Copyright (C) 2012, Jesper Friis
# (see accompanying license files for ASE).
"""
Determines space group of an atoms object using the FINDSYM program
from the ISOTROPY (http://stokes.byu.edu/iso/isotropy.html) software
package by H. T. Stokes and D. M. Hatch, Brigham Young University,
USA.
In order to use this module, you have to download the ISOTROPY package
from http://stokes.byu.edu/iso/isotropy.html and set the environment
variable ISODATA to the path of the directory containing findsym
and data_space.txt (NB: the path should end with a slash (/)).
Example
-------
>>> from ase.lattice.spacegroup import crystal
>>> from ase.utils.geometry import cut
# Start with simple fcc Al
>>> al = crystal('Al', [(0,0,0)], spacegroup=225, cellpar=4.05)
>>> d = findsym(al)
>>> d['spacegroup']
225
# No problem with a more complex structure...
>>> skutterudite = crystal(('Co', 'Sb'),
... basis=[(0.25,0.25,0.25), (0.0, 0.335, 0.158)],
... spacegroup=204,
... cellpar=9.04)
>>> d = findsym(skutterudite)
>>> d['spacegroup']
204
# ... or a non-conventional cut
slab = cut(skutterudite, a=(1, 1, 0), b=(0, 2, 0), c=(0, 0, 1))
d = findsym(slab)
>>> d['spacegroup']
204
"""
import os
import subprocess
import numpy as np
import ase
__all__ = ['findsym', 'unique']
def make_input(atoms, tol=1e-3, centering='P', types=None):
"""Returns input to findsym. See findsym() for a description of
the arguments."""
if types is None:
types = atoms.numbers
s = []
s.append(atoms.get_chemical_formula())
s.append('%g tolerance' % tol)
s.append('2 form of lattice parameters: to be entered as lengths '
'and angles')
s.append('%g %g %g %g %g %g a,b,c,alpha,beta,gamma' %
tuple(ase.lattice.spacegroup.cell.cell_to_cellpar(atoms.cell)))
s.append('2 form of vectors defining unit cell') # ??
s.append('%s centering (P=unknown)' % centering)
s.append('%d number of atoms in primitive unit cell' % len(atoms))
s.append(' '.join(str(n) for n in types) + ' type of each atom')
for p in atoms.get_scaled_positions():
s.append('%10.5f %10.5f %10.5f' % tuple(p))
return '\n'.join(s)
def run(atoms, tol=1e-3, centering='P', types=None, isodata_dir=None):
"""Runs FINDSYM and returns its standard output."""
if isodata_dir is None:
isodata_dir = os.getenv('ISODATA')
if isodata_dir is None:
isodata_dir = '.'
isodata_dir = os.path.normpath(isodata_dir)
findsym = os.path.join(isodata_dir, 'findsym')
data_space = os.path.join(isodata_dir, 'data_space.txt')
for path in findsym, data_space:
if not os.path.exists(path):
raise IOError('no such file: %s. Have you set the ISODATA '
'environment variable to the directory containing '
'findsym and data_space.txt?' % path)
env = os.environ.copy()
env['ISODATA'] = isodata_dir + os.sep
p = subprocess.Popen([findsym], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, env=env)
stdout = p.communicate(make_input(atoms, tol, centering, types))[0]
# if os.path.exists('findsym.log'):
# os.remove('findsym.log')
return stdout
def parse(output):
"""Parse output from FINDSYM (Version 3.2.3, August 2007) and
return a dict. See docstring for findsym() for a description of
the tokens."""
d = {}
lines = output.splitlines()
def search_for_line(line_str):
check_line = [i for i, line in enumerate(lines)
if line.startswith(line_str)]
return check_line
i_cellpar = search_for_line('Lattice parameters')[0]
d['cellpar'] = np.array([float(v) for v in
lines[i_cellpar + 1].split()])
i_natoms = search_for_line('Number of atoms in unit cell')[0]
natoms = int(lines[i_natoms + 1].split()[0])
# Determine number of atoms from atom types, since the number of
# atoms is written with only 3 digits, which crashes the parser
# for more than 999 atoms
i_spg = search_for_line('Space Group')[0]
tokens = lines[i_spg].split()
d['spacegroup'] = int(tokens[2])
# d['symbol_nonconventional'] = tokens[3]
d['symbol'] = tokens[4]
i_origin = search_for_line('Origin at')[0]
d['origin'] = np.array([float(v) for v in lines[i_origin].split()[2:]])
i_abc = search_for_line('Vectors a,b,c')[0]
d['abc'] = np.array([[float(v) for v in line.split()]
for line in lines[i_abc + 1:i_abc + 4]]).T
i_wyck_start = search_for_line('Wyckoff position')
d['wyckoff'] = []
d['tags'] = -np.ones(natoms, dtype=int)
i_wyck_stop = i_wyck_start[1:]
i_wyck_stop += [i_wyck_start[0] + natoms + 3]
# sort the tags to the indivual atoms
for tag, (i_start, i_stop) in enumerate(zip(i_wyck_start,
i_wyck_stop)):
tokens = lines[i_start].split()
d['wyckoff'].append(tokens[2].rstrip(','))
i_tag = [int(line.split()[0]) - 1
for line in lines[i_start + 1:i_stop]]
d['tags'][i_tag] = tag
return d
def findsym(atoms, tol=1e-3, centering='P', types=None, isodata_dir=None):
"""Returns a dict describing the symmetry of *atoms*.
Arguments
---------
atoms: Atoms instance
Atoms instance to find space group of.
tol: float
Accuracy to which dimensions of the unit cell and positions of
atoms are known. Units in Angstrom.
centering: 'P' | 'I' | 'F' | 'A' | 'B' | 'C' | 'R'
Known centering: P (no known centering), I (body-centered), F
(face-centered), A,B,C (base centered), R (rhombohedral
centered with coordinates of centered points at (2/3,1/3,1/3)
and (1/3,2/3,2/3)).
types: None | sequence of integers
Sequence of arbitrary positive integers identifying different
atomic sites, so that a symmetry operation that takes one atom
into another with different type would be forbidden.
Returned dict items
-------------------
abc: 3x3 float array
The vectors a, b, c defining the cell in scaled coordinates.
cellpar: 6 floats
Cell parameters a, b, c, alpha, beta, gamma with lengths in
Angstrom and angles in degree.
origin: 3 floats
Origin of the space group with respect to the origin in the
input data. Coordinates are dimensionless, given in terms of
the lattice parameters of the unit cell in the input.
spacegroup: int
Space group number from the International Tables of
Crystallography.
symbol: str
Hermann-Mauguin symbol (no spaces).
tags: int array
Array of site numbers for each atom. Only atoms within the
first conventional unit cell are tagged, the rest have -1 as
tag.
wyckoff: list
List of wyckoff symbols for each site.
"""
output = run(atoms, tol, centering, types, isodata_dir)
d = parse(output)
return d
def unique(atoms, tol=1e-3, centering='P', types=None, isodata_dir=None):
"""Returns an Atoms object containing only one atom from each unique site.
"""
d = findsym(atoms, tol=tol, centering=centering, types=types,
isodata_dir=isodata_dir)
mask = np.concatenate(([True], np.diff(d['tags']) != 0)) * (d['tags'] >= 0)
at = atoms[mask]
a, b, c, alpha, beta, gamma = d['cellpar']
A, B, C = d['abc']
A *= a
B *= b
C *= c
from numpy.linalg import norm
from numpy import cos, pi
assert abs(np.dot(A, B) -
(norm(A) * norm(B) * cos(gamma * pi / 180.))) < 1e-5
assert abs(np.dot(A, C) -
(norm(A) * norm(C) * cos(beta * pi / 180.))) < 1e-5
assert abs(np.dot(B, C) -
(norm(B) * norm(C) * cos(alpha * pi / 180.))) < 1e-5
at.cell = np.array([A, B, C])
for k in 'origin', 'spacegroup', 'wyckoff':
at.info[k] = d[k]
at.info['unit_cell'] = 'unique'
scaled = at.get_scaled_positions()
at.set_scaled_positions(scaled)
return at
if __name__ == '__main__':
import doctest
print('doctest:', doctest.testmod())
|
suttond/MODOI
|
ase/lattice/spacegroup/findsym.py
|
Python
|
lgpl-3.0
| 8,346
|
[
"ASE",
"CRYSTAL"
] |
12220a9bc72f39cfd609fd844cd2c4e0906c822c960e6c73abd00cf0d46303f9
|
# $Id$
#
# Copyright (C) 2006 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from contextlib import closing
import unittest
from rdkit.six import StringIO
from rdkit.Chem.FeatMaps import FeatMaps, FeatMapParser
def feq(n1, n2, tol=1e-5):
return abs(n1 - n2) <= tol
class TestCase(unittest.TestCase):
data = """
ScoreMode=Best
DirScoreMode=DotFullRange
BeginParams
family=Aromatic radius=2.5 width=1.0 profile=Triangle
family=Acceptor radius=1.5
EndParams
# optional
BeginPoints
family=Acceptor pos=(1.0, 0.0, 5.0) weight=1.25 dir=(1, 1, 0)
family=Aromatic pos=(0.0,1.0,0.0) weight=2.0 dir=(0,0,1) dir=(0,0,-1)
family=Acceptor pos=(1.0,1.0,2.0) weight=1.25
EndPoints
"""
def test1Basics(self):
p = FeatMapParser.FeatMapParser()
p.SetData(self.data)
fm = p.Parse()
self.assertTrue(fm.scoreMode == FeatMaps.FeatMapScoreMode.Best)
self.assertTrue(fm.dirScoreMode == FeatMaps.FeatDirScoreMode.DotFullRange)
self.assertTrue(fm.GetNumFeatures() == 3)
feats = fm.GetFeatures()
self.assertTrue(feq(feats[0].weight, 1.25))
self.assertTrue(feq(feats[1].weight, 2.0))
self.assertTrue(feq(feats[2].weight, 1.25))
self.assertTrue(len(feats[0].featDirs) == 1)
self.assertTrue(len(feats[1].featDirs) == 2)
self.assertTrue(len(feats[2].featDirs) == 0)
fams = [x.GetFamily() for x in feats]
self.assertTrue(fams == ['Acceptor', 'Aromatic', 'Acceptor'])
def test_FeatMapParser(self):
# We can use a string
p = FeatMapParser.FeatMapParser(data=self.data)
fm = p.Parse()
self.assertEqual(fm.GetNumFeatures(), 3)
self.assertEqual([x.GetFamily() for x in fm.GetFeatures()],
['Acceptor', 'Aromatic', 'Acceptor'])
# We can use a list of strings
p = FeatMapParser.FeatMapParser(data=self.data.split('\n'))
fm = p.Parse()
self.assertEqual(fm.GetNumFeatures(), 3)
self.assertEqual([x.GetFamily() for x in fm.GetFeatures()],
['Acceptor', 'Aromatic', 'Acceptor'])
# and a stream
with closing(StringIO(self.data)) as file:
p = FeatMapParser.FeatMapParser(file=file)
fm = p.Parse()
self.assertEqual(fm.GetNumFeatures(), 3)
self.assertEqual([x.GetFamily() for x in fm.GetFeatures()],
['Acceptor', 'Aromatic', 'Acceptor'])
def test_ParseErrors(self):
# Typos in scoreMode or dirscoreMode section
data = "scoreMode = typo\nbeginParams\nfamily=Acceptor radius=1.5\nEndParams"
p = FeatMapParser.FeatMapParser(data=data)
self.assertRaises(FeatMapParser.FeatMapParseError, p.Parse)
data = "dirscoremode = typo\nbeginParams\nfamily=Acceptor radius=1.5\nEndParams"
p = FeatMapParser.FeatMapParser(data=data)
self.assertRaises(FeatMapParser.FeatMapParseError, p.Parse)
data = "typo = All\nbeginParams\nfamily=Acceptor radius=1.5\nEndParams"
p = FeatMapParser.FeatMapParser(data=data)
self.assertRaises(FeatMapParser.FeatMapParseError, p.Parse)
# Typos in paramBlock
data = "beginTypo\nfamily=Acceptor radius=1.5\nEndParams"
p = FeatMapParser.FeatMapParser(data=data)
self.assertRaises(FeatMapParser.FeatMapParseError, p.Parse)
data = "beginParams\nfamily=Acceptor radius=1.5\nEndTypo"
p = FeatMapParser.FeatMapParser(data=data)
self.assertRaises(FeatMapParser.FeatMapParseError, p.Parse)
data = "beginParams\ntypo=Acceptor radius=1.5\nEndParams"
p = FeatMapParser.FeatMapParser(data=data)
self.assertRaises(FeatMapParser.FeatMapParseError, p.Parse)
data = "beginParams\nprofile=Typo\nEndParams"
p = FeatMapParser.FeatMapParser(data=data)
self.assertRaises(FeatMapParser.FeatMapParseError, p.Parse)
# Typos in points block
data = "BeginPoints\npos=(1.0, 0.0, 5.0, 4.0)\nEndPoints"
p = FeatMapParser.FeatMapParser(data=data)
self.assertRaises(ValueError, p.Parse)
data = "BeginPoints\npos=(1.0, 0.0, 5.0) typo=Acceptor\nEndPoints"
p = FeatMapParser.FeatMapParser(data=data)
self.assertRaises(FeatMapParser.FeatMapParseError, p.Parse)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
rvianello/rdkit
|
rdkit/Chem/FeatMaps/UnitTestFeatMapParser.py
|
Python
|
bsd-3-clause
| 4,301
|
[
"RDKit"
] |
a45c2505c1f2b1d77353d8a1c0439057c9079e5c34745cf354c900d76cab3e40
|
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Functionality for selecting a host from pre-defined list."""
import ast
from collections import namedtuple
from functools import lru_cache
from io import BytesIO
import json
import random
from time import sleep
import token
from tokenize import tokenize
from cylc.flow import LOG
from cylc.flow.cfgspec.glbl_cfg import glbl_cfg
from cylc.flow.exceptions import HostSelectException
from cylc.flow.hostuserutil import get_fqdn_by_host, is_remote_host
from cylc.flow.remote import _remote_cylc_cmd, run_cmd
from cylc.flow.terminal import parse_dirty_json
def select_workflow_host(cached=True):
"""Return a host as specified in `[workflow hosts]`.
* Condemned hosts are filtered out.
* Filters out hosts excluded by ranking (if defined).
* Ranks by ranking (if defined).
Args:
cached (bool):
Use a cached version of the global configuration if True
else reload from the filesystem.
Returns:
tuple - See `select_host` for details.
Raises:
HostSelectException:
See `select_host` for details.
socket.gaierror:
See `select_host` for details.
"""
# get the global config, if cached = False a new config instance will
# be returned with the up-to-date configuration.
global_config = glbl_cfg(cached=cached)
return select_host(
# list of workflow hosts
global_config.get([
'scheduler', 'run hosts', 'available'
]) or ['localhost'],
# rankings to apply
ranking_string=global_config.get([
'scheduler', 'run hosts', 'ranking'
]),
# list of condemned hosts
blacklist=global_config.get(
['scheduler', 'run hosts', 'condemned']
),
blacklist_name='condemned host'
)
def select_host(
hosts,
ranking_string=None,
blacklist=None,
blacklist_name=None
):
"""Select a host from the provided list.
If no ranking is provided (in `ranking_string`) then random selection
is used.
Args:
hosts (list):
List of host names to choose from.
NOTE: Host names must be identifiable from the host where the
call is executed.
ranking_string (str):
A multiline string containing Python expressions to filter
hosts by e.g::
# only consider hosts with less than 70% cpu usage
# and a server load of less than 5
cpu_percent() < 70
getloadavg()[0] < 5
And or Python statements to rank hosts by e.g::
# rank by used cpu, then by load average as a tie-break
# (lower scores are better)
cpu_percent()
getloadavg()
Comments are allowed using `#` but not inline comments.
blacklist (list):
List of host names to filter out.
Can be short host names (do not have to be fqdn values)
blacklist_name (str):
The reason for blacklisting these hosts
(used for exceptions).
Raises:
HostSelectException:
In the event that no hosts are available / meet the specified
criterion.
socket.gaierror:
This may be raised in the event of unknown host names
for some installations or not for others.
Returns:
tuple - (hostname, fqdn) the chosen host
hostname (str):
The hostname as provided to this function.
fqdn (str):
The fully qualified domain name of this host.
"""
# standardise host names - remove duplicate items
hostname_map = { # note dictionary keys filter out duplicates
get_fqdn_by_host(host): host
for host in hosts
}
hosts = list(hostname_map)
if blacklist:
blacklist = list(set(map(get_fqdn_by_host, blacklist)))
# dict of conditions and whether they have been met (for error reporting)
data = {
host: {}
for host in hosts
}
# filter out `filter_hosts` if provided
if blacklist:
hosts, data = _filter_by_hostname(
hosts,
blacklist,
blacklist_name,
data=data
)
if not hosts:
# no hosts provided / left after filtering
raise HostSelectException(data)
rankings = []
if ranking_string:
# parse rankings
rankings = list(_get_rankings(ranking_string))
if not rankings:
# no metrics or ranking required, pick host at random
hosts = [random.choice(list(hosts))] # nosec
if not rankings and len(hosts) == 1:
return hostname_map[hosts[0]], hosts[0]
# filter and sort by rankings
metrics = list({x for x, _ in rankings}) # required metrics
results, data = _get_metrics( # get data from each host
hosts, metrics, data)
hosts = list(results) # some hosts might not be contactable
# stop here if we don't need to proceed
if not hosts:
# no hosts provided / left after filtering
raise HostSelectException(data)
if not rankings and len(hosts) == 1:
return hostname_map[hosts[0]], hosts[0]
hosts, data = _filter_by_ranking(
# filter by rankings, sort by ranking
hosts,
rankings,
results,
data=data
)
if not hosts:
# no hosts provided / left after filtering
raise HostSelectException(data)
return hostname_map[hosts[0]], hosts[0]
def _filter_by_hostname(
hosts,
blacklist,
blacklist_name=None,
data=None
):
"""Filter out any hosts present in `blacklist`.
Args:
hosts (list):
List of host fqdns.
blacklist (list):
List of blacklisted host fqdns.
blacklist_name (str):
The reason for blacklisting these hosts
(used for exceptions).
data (dict):
Dict of the form {host: {}}
(used for exceptions).
Examples
>>> _filter_by_hostname(['a'], [], 'meh')
(['a'], {'a': {'blacklisted(meh)': False}})
>>> _filter_by_hostname(['a', 'b'], ['a'])
(['b'], {'a': {'blacklisted': True}, 'b': {'blacklisted': False}})
"""
if not data:
data = {host: {} for host in hosts}
for host in list(hosts):
key = 'blacklisted'
if blacklist_name:
key = f'{key}({blacklist_name})'
if host in blacklist:
hosts.remove(host)
data[host][key] = True
else:
data[host][key] = False
return hosts, data
def _filter_by_ranking(hosts, rankings, results, data=None):
"""Filter and rank by the provided rankings.
Args:
hosts (list):
List of host fqdns.
rankings (list):
Thresholds which must be met.
List of rankings as returned by `get_rankings`.
results (dict):
Nested dictionary as returned by `get_metrics` of the form:
`{host: {value: result, ...}, ...}`.
data (dict):
Dict of the form {host: {}}
(used for exceptions).
Examples:
# ranking
>>> _filter_by_ranking(
... ['a', 'b'],
... [('X', 'RESULT')],
... {'a': {'X': 123}, 'b': {'X': 234}}
... )
(['a', 'b'], {'a': {}, 'b': {}})
# rankings
>>> _filter_by_ranking(
... ['a', 'b'],
... [('X', 'RESULT < 200')],
... {'a': {'X': 123}, 'b': {'X': 234}}
... )
(['a'], {'a': {'X() < 200': True}, 'b': {'X() < 200': False}})
# no matching hosts
>>> _filter_by_ranking(
... ['a'],
... [('X', 'RESULT > 1')],
... {'a': {'X': 0}}
... )
([], {'a': {'X() > 1': False}})
"""
if not data:
data = {host: {} for host in hosts}
good = []
for host in hosts:
host_rankings = {}
host_rank = []
for key, expression in rankings:
item = _reformat_expr(key, expression)
result = _simple_eval(expression, RESULT=results[host][key])
if isinstance(result, bool):
host_rankings[item] = result
data[host][item] = result
else:
host_rank.append(result)
if all(host_rankings.values()):
good.append((host_rank, host))
if not good:
pass
elif good[0][0]:
# there is a ranking to sort by, use it
good.sort()
else:
# no ranking, randomise
random.shuffle(good)
return (
# list of all hosts which passed rankings (sorted by ranking)
[host for _, host in good],
# data
data
)
class SimpleVisitor(ast.NodeVisitor):
"""Abstract syntax tree node visitor for simple safe operations."""
def visit(self, node):
if not isinstance(node, self.whitelist):
# permit only whitelisted operations
raise ValueError(type(node))
return super().visit(node)
whitelist = (
ast.Expression,
# variables
ast.Name, ast.Load, ast.Attribute, ast.Subscript, ast.Index,
# opers
ast.BinOp, ast.operator,
# types
ast.Num, ast.Str,
# comparisons
ast.Compare, ast.cmpop, ast.List, ast.Tuple
)
def _simple_eval(expr, **variables):
"""Safely evaluates simple python expressions.
Supports a minimal subset of Python operators:
* Binary operations
* Simple comparisons
Supports a minimal subset of Python data types:
* Numbers
* Strings
* Tuples
* Lists
Examples:
>>> _simple_eval('1 + 1')
2
>>> _simple_eval('1 < a', a=2)
True
>>> _simple_eval('1 in (1, 2, 3)')
True
>>> import psutil
>>> _simple_eval('a.available > 0', a=psutil.virtual_memory())
True
If you try to get it to do something it's not supposed to:
>>> _simple_eval('open("foo")')
Traceback (most recent call last):
ValueError: open("foo")
"""
try:
node = ast.parse(expr.strip(), mode='eval')
SimpleVisitor().visit(node)
# acceptable use of eval due to restricted language features
return eval( # nosec
compile(node, '<string>', 'eval'),
{'__builtins__': None},
variables
)
except Exception:
raise ValueError(expr)
def _get_rankings(string):
"""Yield parsed ranking expressions.
Examples:
The first ``token.NAME`` encountered is returned as the query:
>>> _get_rankings('foo() == 123').__next__()
(('foo',), 'RESULT == 123')
If multiple are present they will not get parsed:
>>> _get_rankings('foo() in bar()').__next__()
(('foo',), 'RESULT in bar()')
Positional arguments are added to the query tuple:
>>> _get_rankings('1 in foo("a")').__next__()
(('foo', 'a'), '1 in RESULT')
Comments (not in-line) and multi-line strings are permitted:
>>> _get_rankings('''
... # earl of sandwhich
... foo() == 123
... # beef wellington
... ''').__next__()
(('foo',), 'RESULT == 123')
Yields:
tuple - (query, expression)
query (tuple):
The method to call followed by any positional arguments.
expression (str):
The expression with the method call replaced by `RESULT`
"""
for line in string.splitlines():
# parse the string one line at a time
# purposefully don't support multi-line expressions
line = line.strip()
if not line or line.startswith('#'):
# skip blank lines
continue
query = []
start = None
in_args = False
line_feed = BytesIO(line.encode())
for item in tokenize(line_feed.readline):
if item.type == token.ENCODING:
# encoding tag, not of interest
pass
elif not query:
# the first token.NAME has not yet been encountered
if item.type == token.NAME and item.string != 'in':
# this is the first token.NAME, assume it it the method
start = item.start[1]
query.append(item.string)
elif item.string == '(':
# positional arguments follow this
in_args = True
elif item.string == ')':
# end of positional arguments
in_args = False
break
elif in_args:
# literal eval each argument
query.append(ast.literal_eval(item.string))
end = item.end[1]
yield (
tuple(query),
line[:start] + 'RESULT' + line[end:]
)
@lru_cache()
def _tuple_factory(name, params):
"""Wrapper to namedtuple which caches results to prevent duplicates."""
return namedtuple(name, params)
def _deserialise(metrics, data):
"""Convert dict to named tuples.
Examples:
>>> _deserialise(
... [
... ['foo', 'bar'],
... ['baz']
... ],
... [
... {'a': 1, 'b': 2, 'c': 3},
... [1, 2, 3]
... ]
... )
[foo(a=1, b=2, c=3), [1, 2, 3]]
"""
for index, (metric, datum) in enumerate(zip(metrics, data)):
if isinstance(datum, dict):
data[index] = _tuple_factory(
metric[0],
tuple(datum.keys())
)(
*datum.values()
)
return data
def _get_metrics(hosts, metrics, data=None):
"""Retrieve host metrics using SSH if necessary.
Note hosts will not appear in the returned results if:
* They are not contactable.
* There is an error in the command which returns the results.
Args:
hosts (list):
List of host fqdns.
metrics (list):
List in the form [(function, arg1, arg2, ...), ...]
data (dict):
Used for logging success/fail outcomes of the form {host: {}}
Examples:
Command failure:
>>> _get_metrics(['localhost'], [['elephant']])
({}, {'localhost': {'get_metrics': 'Command failed (exit: 1)'}})
Returns:
dict - {host: {(function, arg1, arg2, ...): result}}
"""
host_stats = {}
proc_map = {}
if not data:
data = {host: {} for host in hosts}
# Start up commands on hosts
cmd = ['psutil']
kwargs = {
'stdin_str': json.dumps(metrics),
'capture_process': True
}
for host in hosts:
if is_remote_host(host):
proc_map[host] = _remote_cylc_cmd(cmd, host=host, **kwargs)
else:
proc_map[host] = run_cmd(['cylc'] + cmd, **kwargs)
# Collect results from commands
while proc_map:
for host, proc in list(proc_map.copy().items()):
if proc.poll() is None:
continue
del proc_map[host]
out, err = (f.decode() for f in proc.communicate())
if proc.wait():
# Command failed in verbose/debug mode
LOG.warning(
'Could not evaluate "%s" (return code %d)\n%s',
host, proc.returncode, err
)
data[host]['get_metrics'] = (
f'Command failed (exit: {proc.returncode})')
else:
host_stats[host] = dict(zip(
metrics,
# convert JSON dicts -> namedtuples
_deserialise(metrics, parse_dirty_json(out))
))
sleep(0.01)
return host_stats, data
def _reformat_expr(key, expression):
"""Convert a ranking tuple back into an expression.
Examples:
>>> ranking = 'a().b < c'
>>> _reformat_expr(
... *[x for x in _get_rankings(ranking)][0]
... ) == ranking
True
"""
return expression.replace(
'RESULT',
f'{key[0]}({", ".join(map(repr, key[1:]))})'
)
|
cylc/cylc
|
cylc/flow/host_select.py
|
Python
|
gpl-3.0
| 17,133
|
[
"VisIt"
] |
9e55595a539c4eeb5e215f855433e85da071b1f519215c703a3c9b3136a7052f
|
#!/usr/bin/env python
# Install.py tool to build the GPU library
# used to automate the steps described in the README file in this dir
from __future__ import print_function
import sys,os,subprocess
# help message
help = """
Syntax from src dir: make lib-gpu args="-m machine -h hdir -a arch -p precision -e esuffix -b -o osuffix"
Syntax from lib dir: python Install.py -m machine -h hdir -a arch -p precision -e esuffix -b -o osuffix
specify one or more options, order does not matter
copies an existing Makefile.machine in lib/gpu to Makefile.auto
optionally edits these variables in Makefile.auto:
CUDA_HOME, CUDA_ARCH, CUDA_PRECISION, EXTRAMAKE
optionally uses Makefile.auto to build the GPU library -> libgpu.a
and to copy a Makefile.lammps.esuffix -> Makefile.lammps
optionally copies Makefile.auto to a new Makefile.osuffix
-m = use Makefile.machine as starting point, copy to Makefile.auto
default machine = linux
default for -h, -a, -p, -e settings are those in -m Makefile
-h = set CUDA_HOME variable in Makefile.auto to hdir
hdir = path to NVIDIA Cuda software, e.g. /usr/local/cuda
-a = set CUDA_ARCH variable in Makefile.auto to arch
use arch = sm_20 for Fermi (C2050/C2070, deprecated as of CUDA 8.0)
or GeForce GTX 580 or similar
use arch = sm_30 for Kepler (K10)
use arch = sm_35 for Kepler (K40) or GeForce GTX Titan or similar
use arch = sm_37 for Kepler (dual K80)
use arch = sm_60 for Pascal (P100)
use arch = sm_70 for Volta
-p = set CUDA_PRECISION variable in Makefile.auto to precision
use precision = double or mixed or single
-e = set EXTRAMAKE variable in Makefile.auto to Makefile.lammps.esuffix
-b = make the GPU library using Makefile.auto
first performs a "make clean"
then produces libgpu.a if successful
also copies EXTRAMAKE file -> Makefile.lammps
-e can set which Makefile.lammps.esuffix file is copied
-o = copy final Makefile.auto to Makefile.osuffix
Examples:
make lib-gpu args="-b" # build GPU lib with default Makefile.linux
make lib-gpu args="-m xk7 -p single -o xk7.single" # create new Makefile.xk7.single, altered for single-precision
make lib-gpu args="-m mpi -a sm_35 -p single -o mpi.mixed -b" # create new Makefile.mpi.mixed, also build GPU lib with these settings
"""
# print error message or help
def error(str=None):
if not str: print(help)
else: print("ERROR",str)
sys.exit()
# parse args
args = sys.argv[1:]
nargs = len(args)
if nargs == 0: error()
isuffix = "linux"
hflag = aflag = pflag = eflag = 0
makeflag = 0
outflag = 0
iarg = 0
while iarg < nargs:
if args[iarg] == "-m":
if iarg+2 > nargs: error()
isuffix = args[iarg+1]
iarg += 2
elif args[iarg] == "-h":
if iarg+2 > nargs: error()
hflag = 1
hdir = args[iarg+1]
iarg += 2
elif args[iarg] == "-a":
if iarg+2 > nargs: error()
aflag = 1
arch = args[iarg+1]
iarg += 2
elif args[iarg] == "-p":
if iarg+2 > nargs: error()
pflag = 1
precision = args[iarg+1]
iarg += 2
elif args[iarg] == "-e":
if iarg+2 > nargs: error()
eflag = 1
lmpsuffix = args[iarg+1]
iarg += 2
elif args[iarg] == "-b":
makeflag = 1
iarg += 1
elif args[iarg] == "-o":
if iarg+2 > nargs: error()
outflag = 1
osuffix = args[iarg+1]
iarg += 2
else: error()
if pflag:
if precision == "double": precstr = "-D_DOUBLE_DOUBLE"
elif precision == "mixed": precstr = "-D_SINGLE_DOUBLE"
elif precision == "single": precstr = "-D_SINGLE_SINGLE"
else: error("Invalid precision setting")
# create Makefile.auto
# reset EXTRAMAKE, CUDA_HOME, CUDA_ARCH, CUDA_PRECISION if requested
if not os.path.exists("Makefile.%s" % isuffix):
error("lib/gpu/Makefile.%s does not exist" % isuffix)
lines = open("Makefile.%s" % isuffix,'r').readlines()
fp = open("Makefile.auto",'w')
for line in lines:
words = line.split()
if len(words) != 3:
fp.write(line)
continue
if hflag and words[0] == "CUDA_HOME" and words[1] == '=':
line = line.replace(words[2],hdir)
if aflag and words[0] == "CUDA_ARCH" and words[1] == '=':
line = line.replace(words[2],"-arch=%s" % arch)
if pflag and words[0] == "CUDA_PRECISION" and words[1] == '=':
line = line.replace(words[2],precstr)
if eflag and words[0] == "EXTRAMAKE" and words[1] == '=':
line = line.replace(words[2],"Makefile.lammps.%s" % lmpsuffix)
fp.write(line)
fp.close()
# perform make
# make operations copies EXTRAMAKE file to Makefile.lammps
if makeflag:
print("Building libgpu.a ...")
cmd = "rm -f libgpu.a"
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
cmd = "make -f Makefile.auto clean; make -f Makefile.auto"
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
print(txt.decode('UTF-8'))
if not os.path.exists("libgpu.a"):
error("Build of lib/gpu/libgpu.a was NOT successful")
if not os.path.exists("Makefile.lammps"):
error("lib/gpu/Makefile.lammps was NOT created")
# copy new Makefile.auto to Makefile.osuffix
if outflag:
print("Creating new Makefile.%s" % osuffix)
cmd = "cp Makefile.auto Makefile.%s" % osuffix
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
|
quang-ha/lammps
|
lib/gpu/Install.py
|
Python
|
gpl-2.0
| 5,290
|
[
"LAMMPS"
] |
e43e70fc9bc083e3fdabd562bd1cc8a2e2f06d090b739a54ffddce073bfe20f8
|
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
if __name__ == "__main__":
run_module_suite()
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.py
|
Python
|
mit
| 4,604
|
[
"Gaussian"
] |
641805bebaf386e9f55cd989e2bf3ce365293fbccfe0d7adaba85759181a45f6
|
#!/usr/bin/env python3
"""Merge Bowtie statistics."""
import os
import sys
if len(sys.argv) < 2:
sys.stderr.write("No stats file given.\n")
exit(1)
for i in range(2, len(sys.argv), 2):
if not os.path.isfile(sys.argv[i]):
sys.stderr.write("Stats file {} not found.\n".format(sys.argv[i]))
exit(1)
stats = []
tprocessed, tonevalid, tfailed, tsuppressed = 0, 0, 0, 0
for i in range(1, len(sys.argv), 2):
trimmed = sys.argv[i]
with open(sys.argv[i + 1]) as f:
processed, onevalid, failed, suppressed = -1, -1, -1, -1
for line in f:
vals = line.strip().split(" ")
if "reads processed" in line:
processed = int(vals[-1])
if "reads with at least one reported alignment" in line:
onevalid = int(vals[-2])
if "reads that failed to align" in line:
failed = int(vals[-2])
if "reads with alignments suppressed due to -m" in line:
suppressed = int(vals[-2])
if onevalid > 0:
tonevalid += onevalid
mapped = round((onevalid / processed) * 100, 1)
stats.append((trimmed, processed, onevalid, failed, suppressed, mapped))
with open("stats.tab", "w") as f:
f.write(
"Trim3 size\tReads processed\t"
"Reads with at least one reported alignment\t"
"Reads that failed to align\t"
"Reads with alignments suppressed due to -m\tMapped (%)\n"
)
for vals in stats:
f.write("\t".join(map(str, vals)) + "\n")
tmapped = round((tonevalid / stats[0][1]) * 100, 1)
f.write(
"\t".join(
map(
str,
("Total", stats[0][1], tonevalid, stats[-1][3], stats[-1][4], tmapped),
)
)
+ "\n"
)
|
genialis/resolwe-bio
|
resolwe_bio/tools/mergebowtiestats.py
|
Python
|
apache-2.0
| 1,819
|
[
"Bowtie"
] |
f18e3f9efaab1f9fc80b20a82931290bcc70fce73bf838c5ca3b87b14ff31cb3
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2010 - 2011, University of New Orleans
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
# --
#Contains the process for running a viamics analysis using BLAST, using functions and classes from
#framework.tools.blast. This depends on blast databases being stored at constants.blastdb_dir, and having blastn
#blastn and makeblastdb executables on the path
#
#If the blastn or makeblastdb programs are throwing errors, one possible cause is spaces in the path to input
#or output files. I could not for the life of me figure this out (I think the blastn and makeblastdb programs just
#can't handle it), so I just stick underscores in the name the user gives. If Viamics is installed at say
#/home/username/Desktop/My bioinformatics folder/viamics, there could be a problem.
import os
import cPickle
from framework.tools.helper_functions import SerializeToFile, DeserializeFromFile
from framework.tools.logger import debug
from framework.tools import fasta
import framework.constants as c
import framework.tools.blast
import framework.tools.helper_functions as helper_functions
def _preprocess(p, request_dict):
#fasta.stripped specifies an open keyfile object, but all it does is
#"for line in keys" so a list of strings works here. Using a list avoids all
#the nonsense of sending another file from the client.
mode = request_dict.get("qa_mode")
try:
return fasta.fasta_qa_preprocess(
mode,
request_dict.get("data_file_path"),
request_dict.get("codes_primers"),#keyfile. see above
homopolymer_length = request_dict.get("homopolymer_length"))
except:
debug(helper_functions.formatExceptionInfo(), p.files.log_file)
raise
def _exec(p, request_dict):
p.set_analysis_type('blast')
p.threshold = request_dict.get('threshold_dict')
separator = request_dict['seperator']#sic
debug("storing separator: '%s'" % separator, p.files.log_file)
open(p.files.seperator_file_path, 'w').write(separator)
debug("storing DB name: '%s'" % request_dict['db_name'], p.files.log_file)
open(p.files.blast_db_name_path, 'w').write(request_dict['db_name'])
if p.threshold:
debug("storing confidence threshold", p.files.log_file)
with open(p.files.threshold_path,'w') as f:
f.write(cPickle.dumps(p.threshold))
#add length info to legend
num_seqs = helper_functions.get_number_of_lines(p.files.data_file_path) / 2
name = request_dict['db_name']
#run blast on data
blast_db = os.path.join(c.blastdb_dir,name,name)
debug("Extracting QA info", p.files.log_file)
cmt = open(p.files.data_comment_file_path,'w')
for line in open(p.files.data_file_path):
if line.startswith(';'):
cmt.write(line)
cmt.close()
debug(("running blast on %d sequences against database: %s " % (num_seqs, request_dict['db_name'])), p.files.log_file)
framework.tools.blast.run_blastn(p.files.data_file_path, p.files.blast_output_file_path, blast_db,num=1)
samples_dictionary(p)
samples = DeserializeFromFile(p.files.samples_serialized_file_path).keys()
if len(samples) == 0:
msg = 'error: samples dict contains no samples. perhaps no sequences in the query matched the datbase'
debug(msg,p.files.log_file)
raise ValueError(msg)
else:
open(p.files.all_unique_samples_file_path, 'w').write('\n'.join(samples) + '\n')
debug("%d unique sample names stored" % len(samples), p.files.log_file)
otu_library(p)
if hasattr(p,'threshold'):
separate_low_confidence(p)
def samples_dictionary(p):
debug("Computing sample dictionary", p.files.log_file)
db_name = open(p.files.blast_db_name_path).read()
legend_path = os.path.join(c.blastdb_dir,
db_name,db_name+c.blast_legend_file_extension)
samples_dict = framework.tools.blast.create_samples_dictionary(p.files.blast_output_file_path,
legend_path,
open(p.files.seperator_file_path).read(),
thresholds=p.threshold)
debug("Serializing samples dictionary object", p.files.log_file)
SerializeToFile(samples_dict, p.files.samples_serialized_file_path)
def otu_library(p):
debug("Generating OTU Library", p.files.log_file)
db_name = open(p.files.blast_db_name_path).read()
legend_path = os.path.join(c.blastdb_dir,
db_name,db_name+c.blast_legend_file_extension)
otu_library = framework.tools.blast.get_otu_library(p.files.blast_output_file_path,
legend_path,
open(p.files.seperator_file_path).read())
SerializeToFile(otu_library, p.files.otu_library_file_path)
def separate_low_confidence(p):
debug("Separating low confidence sequences", p.files.log_file)
separator = open(p.files.seperator_file_path).read()
lo_seqs = framework.tools.blast.low_confidence_seqs(open(p.files.data_file_path),
open(p.files.blast_output_file_path),
p.threshold,
separator)
with open(p.files.low_confidence_seqs_path,'w') as o:
for s in lo_seqs:
o.write(s)
def _module_functions(p, request_dict):
return {
'blast': {'func': samples_dictionary, 'desc': 'Samples dictionary'},
'blast': {'func': otu_library, 'desc': 'OTU library'}
}
def _sample_map_functions(p, request_dict):
return {}
|
gblanchard4/viamics
|
framework/modules/blast.py
|
Python
|
gpl-2.0
| 6,151
|
[
"BLAST"
] |
2523682838cf05361c21b53cd9f33ccbdbf646194139957b2868037f10f402f0
|
from abc import ABCMeta
from util.reflection import deriving
from util.functions import unique_id
import special
import attributes
# pylint: disable=W0231
class Node(deriving('eq', 'show')):
__metaclass__ = ABCMeta
def __init__(self):
self._attrs = attributes.Attributes()
self._unique_name = None
@property
def unique_name(self):
if self._unique_name is None:
self._unique_name = unique_id(self.__class__.__name__)
return self._unique_name
def walk_down(self, visitor, short_circuit=False):
visitor.visit(self)
if short_circuit and visitor.recurse_on(self):
self.recurse(visitor, Node.walk_down)
else:
self.recurse(visitor, Node.walk_down)
def walk_up(self, visitor, short_circuit=False):
if short_circuit and visitor.recurse_on(self):
self.recurse(visitor, Node.walk_up)
else:
self.recurse(visitor, Node.walk_up)
visitor.visit(self)
def recurse(self, visitor, walk):
pass
def set_soft(self, key, value):
self._attrs.set_soft(key, value)
def set_hard(self, key, value):
self._attrs.set_hard(key, value)
def __contains__(self, key):
return self._attrs.__contains__(key)
def __getitem__(self, key):
return self._attrs.__getitem__(key)
def __setitem__(self, key, value):
self._attrs.__setitem__(key, value)
class Module(Node):
def __init__(self, name=None, exprs=None):
Node.__init__(self)
self.name = name
self.exprs = exprs
def recurse(self, visitor, walk):
for expr in self.exprs:
walk(expr, visitor)
class NoOp(Node):
def __init__(self):
Node.__init__(self)
class _Collection(Node):
def __init__(self, values=None):
Node.__init__(self)
if values is None:
values = []
self.values = values
def recurse(self, visitor, walk):
for value in self.values:
walk(value, visitor)
class Tuple(_Collection):
pass
class List(_Collection):
pass
class _Value(Node):
def __init__(self, value):
Node.__init__(self)
self.value = value
class Int(_Value):
pass
class Real(_Value):
pass
class Sci(_Value):
pass
class Bool(_Value):
def __init__(self, value):
assert value in ('0', '1')
_Value.__init__(self, value)
class ValueId(_Value):
pass
class SymbolId(_Value):
pass
class TypeId(_Value):
pass
class Unit(Node):
def __init__(self):
Node.__init__(self)
class Block(Node):
def __init__(self, exprs):
Node.__init__(self)
self.exprs = exprs
def recurse(self, visitor, walk):
for expr in self.exprs:
walk(expr, visitor)
class BinOp(Node):
def __init__(self, func, args):
Node.__init__(self)
self.func = func
self.args = args
def recurse(self, visitor, walk):
for arg in self.args:
walk(arg, visitor)
class If(Node):
def __init__(self, pred, if_body, else_body=None):
Node.__init__(self)
self.pred = pred
self.if_body = if_body
if else_body is not None:
self.else_body = else_body
else:
self.else_body = Unit()
def recurse(self, visitor, walk):
walk(self.pred, visitor)
walk(self.if_body, visitor)
walk(self.else_body, visitor)
class Else(Node):
def __init__(self, expr, body):
Node.__init__(self)
self.expr = expr
self.body = body
def recurse(self, visitor, walk):
walk(self.expr, visitor)
walk(self.body, visitor)
class Assign(Node):
def __init__(self, name, value):
Node.__init__(self)
self.name = name
self.value = value
def recurse(self, visitor, walk):
walk(self.value, visitor)
class AssignRhs(Node):
def __init__(self, value):
Node.__init__(self)
self.value = value
def recurse(self, visitor, walk):
walk(self.value, visitor)
class While(Node):
def __init__(self, pred, body):
Node.__init__(self)
self.pred = pred
self.body = body
def recurse(self, visitor, walk):
walk(self.pred, visitor)
walk(self.body, visitor)
class _Declaration(Node):
def __init__(self, name, value, type_=None):
Node.__init__(self)
self.name = name
self.value = value
if type_ is None:
self.type_ = InferType()
else:
self.type_ = type_
def recurse(self, visitor, walk):
walk(self.value, visitor)
class Val(_Declaration):
pass
class Var(_Declaration):
pass
class Mut(_Declaration):
pass
class Ref(_Declaration):
pass
class For(Node):
def __init__(self, clauses, body):
Node.__init__(self)
self.clauses = clauses
self.body = body
def recurse(self, visitor, walk):
walk(self.body, visitor)
class ForClause(Node):
def __init__(self, bind, in_):
Node.__init__(self)
self.bind = bind
self.in_ = in_
class KV(Node):
def __init__(self, key, value):
Node.__init__(self)
self.key = key
self.value = value
def recurse(self, visitor, walk):
walk(self.value, visitor)
class _Comment(Node):
def __init__(self, content):
Node.__init__(self)
self.content = content
class TempComment(_Comment):
pass
class DocComment(_Comment):
pass
class BlockComment(_Comment):
pass
class Binding(Node):
def __init__(self, left, right):
Node.__init__(self)
self.left = left
self.right = right
class Call(Node):
def __init__(self, func, arg, block=None):
Node.__init__(self)
self.func = func
self.arg = arg
if block is not None:
self.block = block
else:
self.block = Unit()
def recurse(self, visitor, walk):
walk(self.arg, visitor)
walk(self.block, visitor)
class Param(Node):
def __init__(self, name, type_=None):
Node.__init__(self)
self.name = name
if type_ is None:
self.type_ = InferType()
else:
self.type_ = type_
class Def(Node):
def __init__(self, name, param, body, return_type=None):
Node.__init__(self)
self.name = name
self.param = param
self.body = body
if return_type is not None:
self.return_type = return_type
else:
self.return_type = InferType()
def recurse(self, visitor, walk):
walk(self.param, visitor)
walk(self.body, visitor)
class _Specification(Node):
def __init__(self, name, body, param=None):
Node.__init__(self)
self.name = name
self.body = body
if param is not None:
self.param = param
else:
self.param = Tuple([])
class Proto(_Specification):
pass
class Object(_Specification):
pass
class Trait(_Specification):
pass
##############################################################################
# Types
##############################################################################
class InferType(Node):
pass
class IntType(Node):
pass
class BoolType(Node):
pass
class RealType(Node):
pass
class UnitType(Node):
pass
class AnyType(Node):
pass
class FunctionType(Node):
def __init__(self, param_type, return_type):
self.param_type = param_type
self.return_type = return_type
def recurse(self, visitor, walk):
walk(self.param_type, visitor)
walk(self.return_type, visitor)
|
dacjames/mara-lang
|
bootstrap/mara/node.py
|
Python
|
mit
| 7,858
|
[
"VisIt"
] |
da33f18a00a45ad97b3aab2a93cb11330511ab131dd3ca29403b0520235656c1
|
#!/usr/bin/env python
# coding: utf-8
# # Resampling Methods
#
# ## Introduction
#
# Resampling methods are an indispensable tool in modern
# statistics. They involve repeatedly drawing samples from a training
# set and refitting a model of interest on each sample in order to
# obtain additional information about the fitted model. For example, in
# order to estimate the variability of a linear regression fit, we can
# repeatedly draw different samples from the training data, fit a linear
# regression to each new sample, and then examine the extent to which
# the resulting fits differ. Such an approach may allow us to obtain
# information that would not be available from fitting the model only
# once using the original training sample.
#
# Two resampling methods are often used in Machine Learning analyses,
# 1. The **bootstrap method**
#
# 2. and **Cross-Validation**
#
# In addition there are several other methods such as the Jackknife and the Blocking methods. We will discuss in particular
# cross-validation and the bootstrap method.
#
#
# Resampling approaches can be computationally expensive, because they
# involve fitting the same statistical method multiple times using
# different subsets of the training data. However, due to recent
# advances in computing power, the computational requirements of
# resampling methods generally are not prohibitive. In this chapter, we
# discuss two of the most commonly used resampling methods,
# cross-validation and the bootstrap. Both methods are important tools
# in the practical application of many statistical learning
# procedures. For example, cross-validation can be used to estimate the
# test error associated with a given statistical learning method in
# order to evaluate its performance, or to select the appropriate level
# of flexibility. The process of evaluating a model’s performance is
# known as model assessment, whereas the process of selecting the proper
# level of flexibility for a model is known as model selection. The
# bootstrap is widely used.
#
#
# * Our simulations can be treated as *computer experiments*. This is particularly the case for Monte Carlo methods
#
# * The results can be analysed with the same statistical tools as we would use analysing experimental data.
#
# * As in all experiments, we are looking for expectation values and an estimate of how accurate they are, i.e., possible sources for errors.
#
# ## Reminder on Statistics
#
#
# * As in other experiments, many numerical experiments have two classes of errors:
#
# * Statistical errors
#
# * Systematical errors
#
#
# * Statistical errors can be estimated using standard tools from statistics
#
# * Systematical errors are method specific and must be treated differently from case to case.
#
# The
# advantage of doing linear regression is that we actually end up with
# analytical expressions for several statistical quantities.
# Standard least squares and Ridge regression allow us to
# derive quantities like the variance and other expectation values in a
# rather straightforward way.
#
#
# It is assumed that $\varepsilon_i
# \sim \mathcal{N}(0, \sigma^2)$ and the $\varepsilon_{i}$ are
# independent, i.e.:
# $$
# \begin{align*}
# \mbox{Cov}(\varepsilon_{i_1},
# \varepsilon_{i_2}) & = \left\{ \begin{array}{lcc} \sigma^2 & \mbox{if}
# & i_1 = i_2, \\ 0 & \mbox{if} & i_1 \not= i_2. \end{array} \right.
# \end{align*}
# $$
# The randomness of $\varepsilon_i$ implies that
# $\mathbf{y}_i$ is also a random variable. In particular,
# $\mathbf{y}_i$ is normally distributed, because $\varepsilon_i \sim
# \mathcal{N}(0, \sigma^2)$ and $\mathbf{X}_{i,\ast} \, \boldsymbol{\beta}$ is a
# non-random scalar. To specify the parameters of the distribution of
# $\mathbf{y}_i$ we need to calculate its first two moments.
#
# Recall that $\boldsymbol{X}$ is a matrix of dimensionality $n\times p$. The
# notation above $\mathbf{X}_{i,\ast}$ means that we are looking at the
# row number $i$ and perform a sum over all values $p$.
#
#
# The assumption we have made here can be summarized as (and this is going to be useful when we discuss the bias-variance trade off)
# that there exists a function $f(\boldsymbol{x})$ and a normal distributed error $\boldsymbol{\varepsilon}\sim \mathcal{N}(0, \sigma^2)$
# which describe our data
# $$
# \boldsymbol{y} = f(\boldsymbol{x})+\boldsymbol{\varepsilon}
# $$
# We approximate this function with our model from the solution of the linear regression equations, that is our
# function $f$ is approximated by $\boldsymbol{\tilde{y}}$ where we want to minimize $(\boldsymbol{y}-\boldsymbol{\tilde{y}})^2$, our MSE, with
# $$
# \boldsymbol{\tilde{y}} = \boldsymbol{X}\boldsymbol{\beta}.
# $$
# We can calculate the expectation value of $\boldsymbol{y}$ for a given element $i$
# $$
# \begin{align*}
# \mathbb{E}(y_i) & =
# \mathbb{E}(\mathbf{X}_{i, \ast} \, \boldsymbol{\beta}) + \mathbb{E}(\varepsilon_i)
# \, \, \, = \, \, \, \mathbf{X}_{i, \ast} \, \beta,
# \end{align*}
# $$
# while
# its variance is
# $$
# \begin{align*} \mbox{Var}(y_i) & = \mathbb{E} \{ [y_i
# - \mathbb{E}(y_i)]^2 \} \, \, \, = \, \, \, \mathbb{E} ( y_i^2 ) -
# [\mathbb{E}(y_i)]^2 \\ & = \mathbb{E} [ ( \mathbf{X}_{i, \ast} \,
# \beta + \varepsilon_i )^2] - ( \mathbf{X}_{i, \ast} \, \boldsymbol{\beta})^2 \\ &
# = \mathbb{E} [ ( \mathbf{X}_{i, \ast} \, \boldsymbol{\beta})^2 + 2 \varepsilon_i
# \mathbf{X}_{i, \ast} \, \boldsymbol{\beta} + \varepsilon_i^2 ] - ( \mathbf{X}_{i,
# \ast} \, \beta)^2 \\ & = ( \mathbf{X}_{i, \ast} \, \boldsymbol{\beta})^2 + 2
# \mathbb{E}(\varepsilon_i) \mathbf{X}_{i, \ast} \, \boldsymbol{\beta} +
# \mathbb{E}(\varepsilon_i^2 ) - ( \mathbf{X}_{i, \ast} \, \boldsymbol{\beta})^2
# \\ & = \mathbb{E}(\varepsilon_i^2 ) \, \, \, = \, \, \,
# \mbox{Var}(\varepsilon_i) \, \, \, = \, \, \, \sigma^2.
# \end{align*}
# $$
# Hence, $y_i \sim \mathcal{N}( \mathbf{X}_{i, \ast} \, \boldsymbol{\beta}, \sigma^2)$, that is $\boldsymbol{y}$ follows a normal distribution with
# mean value $\boldsymbol{X}\boldsymbol{\beta}$ and variance $\sigma^2$ (not be confused with the singular values of the SVD).
#
#
# With the OLS expressions for the parameters $\boldsymbol{\beta}$ we can evaluate the expectation value
# $$
# \mathbb{E}(\boldsymbol{\beta}) = \mathbb{E}[ (\mathbf{X}^{\top} \mathbf{X})^{-1}\mathbf{X}^{T} \mathbf{Y}]=(\mathbf{X}^{T} \mathbf{X})^{-1}\mathbf{X}^{T} \mathbb{E}[ \mathbf{Y}]=(\mathbf{X}^{T} \mathbf{X})^{-1} \mathbf{X}^{T}\mathbf{X}\boldsymbol{\beta}=\boldsymbol{\beta}.
# $$
# This means that the estimator of the regression parameters is unbiased.
# v
# We can also calculate the variance
#
# The variance of $\boldsymbol{\beta}$ is
# $$
# \begin{eqnarray*}
# \mbox{Var}(\boldsymbol{\beta}) & = & \mathbb{E} \{ [\boldsymbol{\beta} - \mathbb{E}(\boldsymbol{\beta})] [\boldsymbol{\beta} - \mathbb{E}(\boldsymbol{\beta})]^{T} \}
# \\
# & = & \mathbb{E} \{ [(\mathbf{X}^{T} \mathbf{X})^{-1} \, \mathbf{X}^{T} \mathbf{Y} - \boldsymbol{\beta}] \, [(\mathbf{X}^{T} \mathbf{X})^{-1} \, \mathbf{X}^{T} \mathbf{Y} - \boldsymbol{\beta}]^{T} \}
# \\
# % & = & \mathbb{E} \{ [(\mathbf{X}^{T} \mathbf{X})^{-1} \, \mathbf{X}^{T} \mathbf{Y}] \, [(\mathbf{X}^{T} \mathbf{X})^{-1} \, \mathbf{X}^{T} \mathbf{Y}]^{T} \} - \boldsymbol{\beta} \, \boldsymbol{\beta}^{T}
# % \\
# % & = & \mathbb{E} \{ (\mathbf{X}^{T} \mathbf{X})^{-1} \, \mathbf{X}^{T} \mathbf{Y} \, \mathbf{Y}^{T} \, \mathbf{X} \, (\mathbf{X}^{T} \mathbf{X})^{-1} \} - \boldsymbol{\beta} \, \boldsymbol{\beta}^{T}
# % \\
# & = & (\mathbf{X}^{T} \mathbf{X})^{-1} \, \mathbf{X}^{T} \, \mathbb{E} \{ \mathbf{Y} \, \mathbf{Y}^{T} \} \, \mathbf{X} \, (\mathbf{X}^{T} \mathbf{X})^{-1} - \boldsymbol{\beta} \, \boldsymbol{\beta}^{T}
# \\
# & = & (\mathbf{X}^{T} \mathbf{X})^{-1} \, \mathbf{X}^{T} \, \{ \mathbf{X} \, \boldsymbol{\beta} \, \boldsymbol{\beta}^{T} \, \mathbf{X}^{T} + \sigma^2 \} \, \mathbf{X} \, (\mathbf{X}^{T} \mathbf{X})^{-1} - \boldsymbol{\beta} \, \boldsymbol{\beta}^{T}
# % \\
# % & = & (\mathbf{X}^T \mathbf{X})^{-1} \, \mathbf{X}^T \, \mathbf{X} \, \boldsymbol{\beta} \, \boldsymbol{\beta}^T \, \mathbf{X}^T \, \mathbf{X} \, (\mathbf{X}^T % \mathbf{X})^{-1}
# % \\
# % & & + \, \, \sigma^2 \, (\mathbf{X}^T \mathbf{X})^{-1} \, \mathbf{X}^T \, \mathbf{X} \, (\mathbf{X}^T \mathbf{X})^{-1} - \boldsymbol{\beta} \boldsymbol{\beta}^T
# \\
# & = & \boldsymbol{\beta} \, \boldsymbol{\beta}^{T} + \sigma^2 \, (\mathbf{X}^{T} \mathbf{X})^{-1} - \boldsymbol{\beta} \, \boldsymbol{\beta}^{T}
# \, \, \, = \, \, \, \sigma^2 \, (\mathbf{X}^{T} \mathbf{X})^{-1},
# \end{eqnarray*}
# $$
# where we have used that $\mathbb{E} (\mathbf{Y} \mathbf{Y}^{T}) =
# \mathbf{X} \, \boldsymbol{\beta} \, \boldsymbol{\beta}^{T} \, \mathbf{X}^{T} +
# \sigma^2 \, \mathbf{I}_{nn}$. From $\mbox{Var}(\boldsymbol{\beta}) = \sigma^2
# \, (\mathbf{X}^{T} \mathbf{X})^{-1}$, one obtains an estimate of the
# variance of the estimate of the $j$-th regression coefficient:
# $\boldsymbol{\sigma}^2 (\boldsymbol{\beta}_j ) = \boldsymbol{\sigma}^2 \sqrt{
# [(\mathbf{X}^{T} \mathbf{X})^{-1}]_{jj} }$. This may be used to
# construct a confidence interval for the estimates.
#
#
# In a similar way, we can obtain analytical expressions for say the
# expectation values of the parameters $\boldsymbol{\beta}$ and their variance
# when we employ Ridge regression, allowing us again to define a confidence interval.
#
# It is rather straightforward to show that
# $$
# \mathbb{E} \big[ \boldsymbol{\beta}^{\mathrm{Ridge}} \big]=(\mathbf{X}^{T} \mathbf{X} + \lambda \mathbf{I}_{pp})^{-1} (\mathbf{X}^{\top} \mathbf{X})\boldsymbol{\beta}^{\mathrm{OLS}}.
# $$
# We see clearly that
# $\mathbb{E} \big[ \boldsymbol{\beta}^{\mathrm{Ridge}} \big] \not= \boldsymbol{\beta}^{\mathrm{OLS}}$ for any $\lambda > 0$. We say then that the ridge estimator is biased.
#
# We can also compute the variance as
# $$
# \mbox{Var}[\boldsymbol{\beta}^{\mathrm{Ridge}}]=\sigma^2[ \mathbf{X}^{T} \mathbf{X} + \lambda \mathbf{I} ]^{-1} \mathbf{X}^{T} \mathbf{X} \{ [ \mathbf{X}^{\top} \mathbf{X} + \lambda \mathbf{I} ]^{-1}\}^{T},
# $$
# and it is easy to see that if the parameter $\lambda$ goes to infinity then the variance of Ridge parameters $\boldsymbol{\beta}$ goes to zero.
#
# With this, we can compute the difference
# $$
# \mbox{Var}[\boldsymbol{\beta}^{\mathrm{OLS}}]-\mbox{Var}(\boldsymbol{\beta}^{\mathrm{Ridge}})=\sigma^2 [ \mathbf{X}^{T} \mathbf{X} + \lambda \mathbf{I} ]^{-1}[ 2\lambda\mathbf{I} + \lambda^2 (\mathbf{X}^{T} \mathbf{X})^{-1} ] \{ [ \mathbf{X}^{T} \mathbf{X} + \lambda \mathbf{I} ]^{-1}\}^{T}.
# $$
# The difference is non-negative definite since each component of the
# matrix product is non-negative definite.
# This means the variance we obtain with the standard OLS will always for $\lambda > 0$ be larger than the variance of $\boldsymbol{\beta}$ obtained with the Ridge estimator. This has interesting consequences when we discuss the so-called bias-variance trade-off below.
#
#
#
# ## Resampling methods
#
# With all these analytical equations for both the OLS and Ridge
# regression, we will now outline how to assess a given model. This will
# lead us to a discussion of the so-called bias-variance tradeoff (see
# below) and so-called resampling methods.
#
# One of the quantities we have discussed as a way to measure errors is
# the mean-squared error (MSE), mainly used for fitting of continuous
# functions. Another choice is the absolute error.
#
# In the discussions below we will focus on the MSE and in particular since we will split the data into test and training data,
# we discuss the
# 1. prediction error or simply the **test error** $\mathrm{Err_{Test}}$, where we have a fixed training set and the test error is the MSE arising from the data reserved for testing. We discuss also the
#
# 2. training error $\mathrm{Err_{Train}}$, which is the average loss over the training data.
#
# As our model becomes more and more complex, more of the training data tends to used. The training may thence adapt to more complicated structures in the data. This may lead to a decrease in the bias (see below for code example) and a slight increase of the variance for the test error.
# For a certain level of complexity the test error will reach minimum, before starting to increase again. The
# training error reaches a saturation.
#
#
#
# Two famous
# resampling methods are the **independent bootstrap** and **the jackknife**.
#
# The jackknife is a special case of the independent bootstrap. Still, the jackknife was made
# popular prior to the independent bootstrap. And as the popularity of
# the independent bootstrap soared, new variants, such as **the dependent bootstrap**.
#
# The Jackknife and independent bootstrap work for
# independent, identically distributed random variables.
# If these conditions are not
# satisfied, the methods will fail. Yet, it should be said that if the data are
# independent, identically distributed, and we only want to estimate the
# variance of $\overline{X}$ (which often is the case), then there is no
# need for bootstrapping.
#
#
# The Jackknife works by making many replicas of the estimator $\widehat{\beta}$.
# The jackknife is a resampling method where we systematically leave out one observation from the vector of observed values $\boldsymbol{x} = (x_1,x_2,\cdots,X_n)$.
# Let $\boldsymbol{x}_i$ denote the vector
# $$
# \boldsymbol{x}_i = (x_1,x_2,\cdots,x_{i-1},x_{i+1},\cdots,x_n),
# $$
# which equals the vector $\boldsymbol{x}$ with the exception that observation
# number $i$ is left out. Using this notation, define
# $\widehat{\beta}_i$ to be the estimator
# $\widehat{\beta}$ computed using $\vec{X}_i$.
# In[1]:
from numpy import *
from numpy.random import randint, randn
from time import time
def jackknife(data, stat):
n = len(data);t = zeros(n); inds = arange(n); t0 = time()
## 'jackknifing' by leaving out an observation for each i
for i in range(n):
t[i] = stat(delete(data,i) )
# analysis
print("Runtime: %g sec" % (time()-t0)); print("Jackknife Statistics :")
print("original bias std. error")
print("%8g %14g %15g" % (stat(data),(n-1)*mean(t)/n, (n*var(t))**.5))
return t
# Returns mean of data samples
def stat(data):
return mean(data)
mu, sigma = 100, 15
datapoints = 10000
x = mu + sigma*random.randn(datapoints)
# jackknife returns the data sample
t = jackknife(x, stat)
# ### Bootstrap
#
# Bootstrapping is a nonparametric approach to statistical inference
# that substitutes computation for more traditional distributional
# assumptions and asymptotic results. Bootstrapping offers a number of
# advantages:
# 1. The bootstrap is quite general, although there are some cases in which it fails.
#
# 2. Because it does not require distributional assumptions (such as normally distributed errors), the bootstrap can provide more accurate inferences when the data are not well behaved or when the sample size is small.
#
# 3. It is possible to apply the bootstrap to statistics with sampling distributions that are difficult to derive, even asymptotically.
#
# 4. It is relatively simple to apply the bootstrap to complex data-collection plans (such as stratified and clustered samples).
#
# Since $\widehat{\beta} = \widehat{\beta}(\boldsymbol{X})$ is a function of random variables,
# $\widehat{\beta}$ itself must be a random variable. Thus it has
# a pdf, call this function $p(\boldsymbol{t})$. The aim of the bootstrap is to
# estimate $p(\boldsymbol{t})$ by the relative frequency of
# $\widehat{\beta}$. You can think of this as using a histogram
# in the place of $p(\boldsymbol{t})$. If the relative frequency closely
# resembles $p(\vec{t})$, then using numerics, it is straight forward to
# estimate all the interesting parameters of $p(\boldsymbol{t})$ using point
# estimators.
#
#
#
# In the case that $\widehat{\beta}$ has
# more than one component, and the components are independent, we use the
# same estimator on each component separately. If the probability
# density function of $X_i$, $p(x)$, had been known, then it would have
# been straight forward to do this by:
# 1. Drawing lots of numbers from $p(x)$, suppose we call one such set of numbers $(X_1^*, X_2^*, \cdots, X_n^*)$.
#
# 2. Then using these numbers, we could compute a replica of $\widehat{\beta}$ called $\widehat{\beta}^*$.
#
# By repeated use of (1) and (2), many
# estimates of $\widehat{\beta}$ could have been obtained. The
# idea is to use the relative frequency of $\widehat{\beta}^*$
# (think of a histogram) as an estimate of $p(\boldsymbol{t})$.
#
#
# But
# unless there is enough information available about the process that
# generated $X_1,X_2,\cdots,X_n$, $p(x)$ is in general
# unknown. Therefore, [Efron in 1979](https://projecteuclid.org/euclid.aos/1176344552) asked the
# question: What if we replace $p(x)$ by the relative frequency
# of the observation $X_i$; if we draw observations in accordance with
# the relative frequency of the observations, will we obtain the same
# result in some asymptotic sense? The answer is yes.
#
#
# Instead of generating the histogram for the relative
# frequency of the observation $X_i$, just draw the values
# $(X_1^*,X_2^*,\cdots,X_n^*)$ with replacement from the vector
# $\boldsymbol{X}$.
#
#
# The independent bootstrap works like this:
#
# 1. Draw with replacement $n$ numbers for the observed variables $\boldsymbol{x} = (x_1,x_2,\cdots,x_n)$.
#
# 2. Define a vector $\boldsymbol{x}^*$ containing the values which were drawn from $\boldsymbol{x}$.
#
# 3. Using the vector $\boldsymbol{x}^*$ compute $\widehat{\beta}^*$ by evaluating $\widehat \beta$ under the observations $\boldsymbol{x}^*$.
#
# 4. Repeat this process $k$ times.
#
# When you are done, you can draw a histogram of the relative frequency
# of $\widehat \beta^*$. This is your estimate of the probability
# distribution $p(t)$. Using this probability distribution you can
# estimate any statistics thereof. In principle you never draw the
# histogram of the relative frequency of $\widehat{\beta}^*$. Instead
# you use the estimators corresponding to the statistic of interest. For
# example, if you are interested in estimating the variance of $\widehat
# \beta$, apply the etsimator $\widehat \sigma^2$ to the values
# $\widehat \beta^*$.
#
# Before we proceed however, we need to remind ourselves about a central
# theorem in statistics, namely the so-called **central limit theorem**.
# This theorem plays a central role in understanding why the Bootstrap
# (and other resampling methods) work so well on independent and
# identically distributed variables.
#
#
# Suppose we have a PDF $p(x)$ from which we generate a series $N$
# of averages $\langle x_i \rangle$. Each mean value $\langle x_i \rangle$
# is viewed as the average of a specific measurement, e.g., throwing
# dice 100 times and then taking the average value, or producing a certain
# amount of random numbers.
# For notational ease, we set $\langle x_i \rangle=x_i$ in the discussion
# which follows.
#
# If we compute the mean $z$ of $m$ such mean values $x_i$
# $$
# z=\frac{x_1+x_2+\dots+x_m}{m},
# $$
# the question we pose is which is the PDF of the new variable $z$.
#
#
# The probability of obtaining an average value $z$ is the product of the
# probabilities of obtaining arbitrary individual mean values $x_i$,
# but with the constraint that the average is $z$. We can express this through
# the following expression
# $$
# \tilde{p}(z)=\int dx_1p(x_1)\int dx_2p(x_2)\dots\int dx_mp(x_m)
# \delta(z-\frac{x_1+x_2+\dots+x_m}{m}),
# $$
# where the $\delta$-function enbodies the constraint that the mean is $z$.
# All measurements that lead to each individual $x_i$ are expected to
# be independent, which in turn means that we can express $\tilde{p}$ as the
# product of individual $p(x_i)$. The independence assumption is important in the derivation of the central limit theorem.
#
#
#
# If we use the integral expression for the $\delta$-function
# $$
# \delta(z-\frac{x_1+x_2+\dots+x_m}{m})=\frac{1}{2\pi}\int_{-\infty}^{\infty}
# dq\exp{\left(iq(z-\frac{x_1+x_2+\dots+x_m}{m})\right)},
# $$
# and inserting $e^{i\mu q-i\mu q}$ where $\mu$ is the mean value
# we arrive at
# $$
# \tilde{p}(z)=\frac{1}{2\pi}\int_{-\infty}^{\infty}
# dq\exp{\left(iq(z-\mu)\right)}\left[\int_{-\infty}^{\infty}
# dxp(x)\exp{\left(iq(\mu-x)/m\right)}\right]^m,
# $$
# with the integral over $x$ resulting in
# $$
# \int_{-\infty}^{\infty}dxp(x)\exp{\left(iq(\mu-x)/m\right)}=
# \int_{-\infty}^{\infty}dxp(x)
# \left[1+\frac{iq(\mu-x)}{m}-\frac{q^2(\mu-x)^2}{2m^2}+\dots\right].
# $$
# The second term on the rhs disappears since this is just the mean and
# employing the definition of $\sigma^2$ we have
# $$
# \int_{-\infty}^{\infty}dxp(x)e^{\left(iq(\mu-x)/m\right)}=
# 1-\frac{q^2\sigma^2}{2m^2}+\dots,
# $$
# resulting in
# $$
# \left[\int_{-\infty}^{\infty}dxp(x)\exp{\left(iq(\mu-x)/m\right)}\right]^m\approx
# \left[1-\frac{q^2\sigma^2}{2m^2}+\dots \right]^m,
# $$
# and in the limit $m\rightarrow \infty$ we obtain
# $$
# \tilde{p}(z)=\frac{1}{\sqrt{2\pi}(\sigma/\sqrt{m})}
# \exp{\left(-\frac{(z-\mu)^2}{2(\sigma/\sqrt{m})^2}\right)},
# $$
# which is the normal distribution with variance
# $\sigma^2_m=\sigma^2/m$, where $\sigma$ is the variance of the PDF $p(x)$
# and $\mu$ is also the mean of the PDF $p(x)$.
#
#
# Thus, the central limit theorem states that the PDF $\tilde{p}(z)$ of
# the average of $m$ random values corresponding to a PDF $p(x)$
# is a normal distribution whose mean is the
# mean value of the PDF $p(x)$ and whose variance is the variance
# of the PDF $p(x)$ divided by $m$, the number of values used to compute $z$.
#
# The central limit theorem leads to the well-known expression for the
# standard deviation, given by
# $$
# \sigma_m=
# \frac{\sigma}{\sqrt{m}}.
# $$
# The latter is true only if the average value is known exactly. This is obtained in the limit
# $m\rightarrow \infty$ only. Because the mean and the variance are measured quantities we obtain
# the familiar expression in statistics
# $$
# \sigma_m\approx
# \frac{\sigma}{\sqrt{m-1}}.
# $$
# In many cases however the above estimate for the standard deviation,
# in particular if correlations are strong, may be too simplistic. Keep
# in mind that we have assumed that the variables $x$ are independent
# and identically distributed. This is obviously not always the
# case. For example, the random numbers (or better pseudorandom numbers)
# we generate in various calculations do always exhibit some
# correlations.
#
#
#
# The theorem is satisfied by a large class of PDFs. Note however that for a
# finite $m$, it is not always possible to find a closed form /analytic expression for
# $\tilde{p}(x)$.
#
#
# The following code starts with a Gaussian distribution with mean value
# $\mu =100$ and variance $\sigma=15$. We use this to generate the data
# used in the bootstrap analysis. The bootstrap analysis returns a data
# set after a given number of bootstrap operations (as many as we have
# data points). This data set consists of estimated mean values for each
# bootstrap operation. The histogram generated by the bootstrap method
# shows that the distribution for these mean values is also a Gaussian,
# centered around the mean value $\mu=100$ but with standard deviation
# $\sigma/\sqrt{n}$, where $n$ is the number of bootstrap samples (in
# this case the same as the number of original data points). The value
# of the standard deviation is what we expect from the central limit
# theorem.
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
from time import time
from scipy.stats import norm
import matplotlib.pyplot as plt
# Returns mean of bootstrap samples
# Bootstrap algorithm
def bootstrap(data, datapoints):
t = np.zeros(datapoints)
n = len(data)
# non-parametric bootstrap
for i in range(datapoints):
t[i] = np.mean(data[np.random.randint(0,n,n)])
# analysis
print("Bootstrap Statistics :")
print("original bias std. error")
print("%8g %8g %14g %15g" % (np.mean(data), np.std(data),np.mean(t),np.std(t)))
return t
# We set the mean value to 100 and the standard deviation to 15
mu, sigma = 100, 15
datapoints = 10000
# We generate random numbers according to the normal distribution
x = mu + sigma*np.random.randn(datapoints)
# bootstrap returns the data sample
t = bootstrap(x, datapoints)
# We see that our new variance and from that the standard deviation, agrees with the central limit theorem.
#
# We plot then the histogram together with a best fit for the data set.
# In[3]:
# the histogram of the bootstrapped data (normalized data if density = True)
n, binsboot, patches = plt.hist(t, 50, density=True, facecolor='red', alpha=0.75)
# add a 'best fit' line
y = norm.pdf(binsboot, np.mean(t), np.std(t))
lt = plt.plot(binsboot, y, 'b', linewidth=1)
plt.xlabel('x')
plt.ylabel('Probability')
plt.grid(True)
plt.show()
# ## The bias-variance tradeoff
#
#
# We will discuss the bias-variance tradeoff in the context of
# continuous predictions such as regression. However, many of the
# intuitions and ideas discussed here also carry over to classification
# tasks. Consider a dataset $\mathcal{L}$ consisting of the data
# $\mathbf{X}_\mathcal{L}=\{(y_j, \boldsymbol{x}_j), j=0\ldots n-1\}$.
#
# Let us assume that the true data is generated from a noisy model
# $$
# \boldsymbol{y}=f(\boldsymbol{x}) + \boldsymbol{\epsilon}
# $$
# where $\epsilon$ is normally distributed with mean zero and standard deviation $\sigma^2$.
#
# In our derivation of the ordinary least squares method we defined then
# an approximation to the function $f$ in terms of the parameters
# $\boldsymbol{\beta}$ and the design matrix $\boldsymbol{X}$ which embody our model,
# that is $\boldsymbol{\tilde{y}}=\boldsymbol{X}\boldsymbol{\beta}$.
#
# Thereafter we found the parameters $\boldsymbol{\beta}$ by optimizing the means squared error via the so-called cost function
# $$
# C(\boldsymbol{X},\boldsymbol{\beta}) =\frac{1}{n}\sum_{i=0}^{n-1}(y_i-\tilde{y}_i)^2=\mathbb{E}\left[(\boldsymbol{y}-\boldsymbol{\tilde{y}})^2\right].
# $$
# We can rewrite this as
# $$
# \mathbb{E}\left[(\boldsymbol{y}-\boldsymbol{\tilde{y}})^2\right]=\frac{1}{n}\sum_i(f_i-\mathbb{E}\left[\boldsymbol{\tilde{y}}\right])^2+\frac{1}{n}\sum_i(\tilde{y}_i-\mathbb{E}\left[\boldsymbol{\tilde{y}}\right])^2+\sigma^2.
# $$
# The three terms represent the square of the bias of the learning
# method, which can be thought of as the error caused by the simplifying
# assumptions built into the method. The second term represents the
# variance of the chosen model and finally the last terms is variance of
# the error $\boldsymbol{\epsilon}$.
#
# To derive this equation, we need to recall that the variance of $\boldsymbol{y}$ and $\boldsymbol{\epsilon}$ are both equal to $\sigma^2$. The mean value of $\boldsymbol{\epsilon}$ is by definition equal to zero. Furthermore, the function $f$ is not a stochastics variable, idem for $\boldsymbol{\tilde{y}}$.
# We use a more compact notation in terms of the expectation value
# $$
# \mathbb{E}\left[(\boldsymbol{y}-\boldsymbol{\tilde{y}})^2\right]=\mathbb{E}\left[(\boldsymbol{f}+\boldsymbol{\epsilon}-\boldsymbol{\tilde{y}})^2\right],
# $$
# and adding and subtracting $\mathbb{E}\left[\boldsymbol{\tilde{y}}\right]$ we get
# $$
# \mathbb{E}\left[(\boldsymbol{y}-\boldsymbol{\tilde{y}})^2\right]=\mathbb{E}\left[(\boldsymbol{f}+\boldsymbol{\epsilon}-\boldsymbol{\tilde{y}}+\mathbb{E}\left[\boldsymbol{\tilde{y}}\right]-\mathbb{E}\left[\boldsymbol{\tilde{y}}\right])^2\right],
# $$
# which, using the abovementioned expectation values can be rewritten as
# $$
# \mathbb{E}\left[(\boldsymbol{y}-\boldsymbol{\tilde{y}})^2\right]=\mathbb{E}\left[(\boldsymbol{y}-\mathbb{E}\left[\boldsymbol{\tilde{y}}\right])^2\right]+\mathrm{Var}\left[\boldsymbol{\tilde{y}}\right]+\sigma^2,
# $$
# that is the rewriting in terms of the so-called bias, the variance of the model $\boldsymbol{\tilde{y}}$ and the variance of $\boldsymbol{\epsilon}$.
# In[4]:
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.utils import resample
np.random.seed(2018)
n = 500
n_boostraps = 100
degree = 18 # A quite high value, just to show.
noise = 0.1
# Make data set.
x = np.linspace(-1, 3, n).reshape(-1, 1)
y = np.exp(-x**2) + 1.5 * np.exp(-(x-2)**2) + np.random.normal(0, 0.1, x.shape)
# Hold out some test data that is never used in training.
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# Combine x transformation and model into one operation.
# Not neccesary, but convenient.
model = make_pipeline(PolynomialFeatures(degree=degree), LinearRegression(fit_intercept=False))
# The following (m x n_bootstraps) matrix holds the column vectors y_pred
# for each bootstrap iteration.
y_pred = np.empty((y_test.shape[0], n_boostraps))
for i in range(n_boostraps):
x_, y_ = resample(x_train, y_train)
# Evaluate the new model on the same test data each time.
y_pred[:, i] = model.fit(x_, y_).predict(x_test).ravel()
# Note: Expectations and variances taken w.r.t. different training
# data sets, hence the axis=1. Subsequent means are taken across the test data
# set in order to obtain a total value, but before this we have error/bias/variance
# calculated per data point in the test set.
# Note 2: The use of keepdims=True is important in the calculation of bias as this
# maintains the column vector form. Dropping this yields very unexpected results.
error = np.mean( np.mean((y_test - y_pred)**2, axis=1, keepdims=True) )
bias = np.mean( (y_test - np.mean(y_pred, axis=1, keepdims=True))**2 )
variance = np.mean( np.var(y_pred, axis=1, keepdims=True) )
print('Error:', error)
print('Bias^2:', bias)
print('Var:', variance)
print('{} >= {} + {} = {}'.format(error, bias, variance, bias+variance))
plt.plot(x[::5, :], y[::5, :], label='f(x)')
plt.scatter(x_test, y_test, label='Data points')
plt.scatter(x_test, np.mean(y_pred, axis=1), label='Pred')
plt.legend()
plt.show()
# In[5]:
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.utils import resample
np.random.seed(2018)
n = 40
n_boostraps = 100
maxdegree = 14
# Make data set.
x = np.linspace(-3, 3, n).reshape(-1, 1)
y = np.exp(-x**2) + 1.5 * np.exp(-(x-2)**2)+ np.random.normal(0, 0.1, x.shape)
error = np.zeros(maxdegree)
bias = np.zeros(maxdegree)
variance = np.zeros(maxdegree)
polydegree = np.zeros(maxdegree)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
for degree in range(maxdegree):
model = make_pipeline(PolynomialFeatures(degree=degree), LinearRegression(fit_intercept=False))
y_pred = np.empty((y_test.shape[0], n_boostraps))
for i in range(n_boostraps):
x_, y_ = resample(x_train, y_train)
y_pred[:, i] = model.fit(x_, y_).predict(x_test).ravel()
polydegree[degree] = degree
error[degree] = np.mean( np.mean((y_test - y_pred)**2, axis=1, keepdims=True) )
bias[degree] = np.mean( (y_test - np.mean(y_pred, axis=1, keepdims=True))**2 )
variance[degree] = np.mean( np.var(y_pred, axis=1, keepdims=True) )
print('Polynomial degree:', degree)
print('Error:', error[degree])
print('Bias^2:', bias[degree])
print('Var:', variance[degree])
print('{} >= {} + {} = {}'.format(error[degree], bias[degree], variance[degree], bias[degree]+variance[degree]))
plt.plot(polydegree, error, label='Error')
plt.plot(polydegree, bias, label='bias')
plt.plot(polydegree, variance, label='Variance')
plt.legend()
plt.show()
# The bias-variance tradeoff summarizes the fundamental tension in
# machine learning, particularly supervised learning, between the
# complexity of a model and the amount of training data needed to train
# it. Since data is often limited, in practice it is often useful to
# use a less-complex model with higher bias, that is a model whose asymptotic
# performance is worse than another model because it is easier to
# train and less sensitive to sampling noise arising from having a
# finite-sized training dataset (smaller variance).
#
#
#
# The above equations tell us that in
# order to minimize the expected test error, we need to select a
# statistical learning method that simultaneously achieves low variance
# and low bias. Note that variance is inherently a nonnegative quantity,
# and squared bias is also nonnegative. Hence, we see that the expected
# test MSE can never lie below $Var(\epsilon)$, the irreducible error.
#
#
# What do we mean by the variance and bias of a statistical learning
# method? The variance refers to the amount by which our model would change if we
# estimated it using a different training data set. Since the training
# data are used to fit the statistical learning method, different
# training data sets will result in a different estimate. But ideally the
# estimate for our model should not vary too much between training
# sets. However, if a method has high variance then small changes in
# the training data can result in large changes in the model. In general, more
# flexible statistical methods have higher variance.
#
#
# You may also find this recent [article](https://www.pnas.org/content/116/32/15849) of interest.
# In[6]:
"""
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
def true_fun(X):
return np.cos(1.5 * np.pi * X)
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
# In[7]:
# Common imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from sklearn.metrics import mean_squared_error
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
infile = open(data_path("EoS.csv"),'r')
# Read the EoS data as csv file and organize the data into two arrays with density and energies
EoS = pd.read_csv(infile, names=('Density', 'Energy'))
EoS['Energy'] = pd.to_numeric(EoS['Energy'], errors='coerce')
EoS = EoS.dropna()
Energies = EoS['Energy']
Density = EoS['Density']
# The design matrix now as function of various polytrops
Maxpolydegree = 30
X = np.zeros((len(Density),Maxpolydegree))
X[:,0] = 1.0
testerror = np.zeros(Maxpolydegree)
trainingerror = np.zeros(Maxpolydegree)
polynomial = np.zeros(Maxpolydegree)
trials = 100
for polydegree in range(1, Maxpolydegree):
polynomial[polydegree] = polydegree
for degree in range(polydegree):
X[:,degree] = Density**(degree/3.0)
# loop over trials in order to estimate the expectation value of the MSE
testerror[polydegree] = 0.0
trainingerror[polydegree] = 0.0
for samples in range(trials):
x_train, x_test, y_train, y_test = train_test_split(X, Energies, test_size=0.2)
model = LinearRegression(fit_intercept=False).fit(x_train, y_train)
ypred = model.predict(x_train)
ytilde = model.predict(x_test)
testerror[polydegree] += mean_squared_error(y_test, ytilde)
trainingerror[polydegree] += mean_squared_error(y_train, ypred)
testerror[polydegree] /= trials
trainingerror[polydegree] /= trials
print("Degree of polynomial: %3d"% polynomial[polydegree])
print("Mean squared error on training data: %.8f" % trainingerror[polydegree])
print("Mean squared error on test data: %.8f" % testerror[polydegree])
plt.plot(polynomial, np.log10(trainingerror), label='Training Error')
plt.plot(polynomial, np.log10(testerror), label='Test Error')
plt.xlabel('Polynomial degree')
plt.ylabel('log10[MSE]')
plt.legend()
plt.show()
# ## Cross-validation
#
# When the repetitive splitting of the data set is done randomly,
# samples may accidently end up in a fast majority of the splits in
# either training or test set. Such samples may have an unbalanced
# influence on either model building or prediction evaluation. To avoid
# this $k$-fold cross-validation structures the data splitting. The
# samples are divided into $k$ more or less equally sized exhaustive and
# mutually exclusive subsets. In turn (at each split) one of these
# subsets plays the role of the test set while the union of the
# remaining subsets constitutes the training set. Such a splitting
# warrants a balanced representation of each sample in both training and
# test set over the splits. Still the division into the $k$ subsets
# involves a degree of randomness. This may be fully excluded when
# choosing $k=n$. This particular case is referred to as leave-one-out
# cross-validation (LOOCV).
#
#
# * Define a range of interest for the penalty parameter.
#
# * Divide the data set into training and test set comprising samples $\{1, \ldots, n\} \setminus i$ and $\{ i \}$, respectively.
#
# * Fit the linear regression model by means of ridge estimation for each $\lambda$ in the grid using the training set, and the corresponding estimate of the error variance $\boldsymbol{\sigma}_{-i}^2(\lambda)$, as
# $$
# \begin{align*}
# \boldsymbol{\beta}_{-i}(\lambda) & = ( \boldsymbol{X}_{-i, \ast}^{T}
# \boldsymbol{X}_{-i, \ast} + \lambda \boldsymbol{I}_{pp})^{-1}
# \boldsymbol{X}_{-i, \ast}^{T} \boldsymbol{y}_{-i}
# \end{align*}
# $$
# * Evaluate the prediction performance of these models on the test set by $\log\{L[y_i, \boldsymbol{X}_{i, \ast}; \boldsymbol{\beta}_{-i}(\lambda), \boldsymbol{\sigma}_{-i}^2(\lambda)]\}$. Or, by the prediction error $|y_i - \boldsymbol{X}_{i, \ast} \boldsymbol{\beta}_{-i}(\lambda)|$, the relative error, the error squared or the R2 score function.
#
# * Repeat the first three steps such that each sample plays the role of the test set once.
#
# * Average the prediction performances of the test sets at each grid point of the penalty bias/parameter. It is an estimate of the prediction performance of the model corresponding to this value of the penalty parameter on novel data. It is defined as
# $$
# \begin{align*}
# \frac{1}{n} \sum_{i = 1}^n \log\{L[y_i, \mathbf{X}_{i, \ast}; \boldsymbol{\beta}_{-i}(\lambda), \boldsymbol{\sigma}_{-i}^2(\lambda)]\}.
# \end{align*}
# $$
# For the various values of $k$
#
# 1. shuffle the dataset randomly.
#
# 2. Split the dataset into $k$ groups.
#
# 3. For each unique group:
#
# a. Decide which group to use as set for test data
#
# b. Take the remaining groups as a training data set
#
# c. Fit a model on the training set and evaluate it on the test set
#
# d. Retain the evaluation score and discard the model
#
#
# 5. Summarize the model using the sample of model evaluation scores
#
# The code here uses Ridge regression with cross-validation (CV) resampling and $k$-fold CV in order to fit a specific polynomial.
# In[8]:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import PolynomialFeatures
# A seed just to ensure that the random numbers are the same for every run.
# Useful for eventual debugging.
np.random.seed(3155)
# Generate the data.
nsamples = 100
x = np.random.randn(nsamples)
y = 3*x**2 + np.random.randn(nsamples)
## Cross-validation on Ridge regression using KFold only
# Decide degree on polynomial to fit
poly = PolynomialFeatures(degree = 6)
# Decide which values of lambda to use
nlambdas = 500
lambdas = np.logspace(-3, 5, nlambdas)
# Initialize a KFold instance
k = 5
kfold = KFold(n_splits = k)
# Perform the cross-validation to estimate MSE
scores_KFold = np.zeros((nlambdas, k))
i = 0
for lmb in lambdas:
ridge = Ridge(alpha = lmb)
j = 0
for train_inds, test_inds in kfold.split(x):
xtrain = x[train_inds]
ytrain = y[train_inds]
xtest = x[test_inds]
ytest = y[test_inds]
Xtrain = poly.fit_transform(xtrain[:, np.newaxis])
ridge.fit(Xtrain, ytrain[:, np.newaxis])
Xtest = poly.fit_transform(xtest[:, np.newaxis])
ypred = ridge.predict(Xtest)
scores_KFold[i,j] = np.sum((ypred - ytest[:, np.newaxis])**2)/np.size(ypred)
j += 1
i += 1
estimated_mse_KFold = np.mean(scores_KFold, axis = 1)
## Cross-validation using cross_val_score from sklearn along with KFold
# kfold is an instance initialized above as:
# kfold = KFold(n_splits = k)
estimated_mse_sklearn = np.zeros(nlambdas)
i = 0
for lmb in lambdas:
ridge = Ridge(alpha = lmb)
X = poly.fit_transform(x[:, np.newaxis])
estimated_mse_folds = cross_val_score(ridge, X, y[:, np.newaxis], scoring='neg_mean_squared_error', cv=kfold)
# cross_val_score return an array containing the estimated negative mse for every fold.
# we have to the the mean of every array in order to get an estimate of the mse of the model
estimated_mse_sklearn[i] = np.mean(-estimated_mse_folds)
i += 1
## Plot and compare the slightly different ways to perform cross-validation
plt.figure()
plt.plot(np.log10(lambdas), estimated_mse_sklearn, label = 'cross_val_score')
plt.plot(np.log10(lambdas), estimated_mse_KFold, 'r--', label = 'KFold')
plt.xlabel('log10(lambda)')
plt.ylabel('mse')
plt.legend()
plt.show()
# More examples of the application of cross-validation follow here.
# In[9]:
# Common imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
infile = open(data_path("EoS.csv"),'r')
# Read the EoS data as csv file and organize the data into two arrays with density and energies
EoS = pd.read_csv(infile, names=('Density', 'Energy'))
EoS['Energy'] = pd.to_numeric(EoS['Energy'], errors='coerce')
EoS = EoS.dropna()
Energies = EoS['Energy']
Density = EoS['Density']
# The design matrix now as function of various polytrops
Maxpolydegree = 30
X = np.zeros((len(Density),Maxpolydegree))
X[:,0] = 1.0
estimated_mse_sklearn = np.zeros(Maxpolydegree)
polynomial = np.zeros(Maxpolydegree)
k =5
kfold = KFold(n_splits = k)
for polydegree in range(1, Maxpolydegree):
polynomial[polydegree] = polydegree
for degree in range(polydegree):
X[:,degree] = Density**(degree/3.0)
OLS = LinearRegression(fit_intercept=False)
# loop over trials in order to estimate the expectation value of the MSE
estimated_mse_folds = cross_val_score(OLS, X, Energies, scoring='neg_mean_squared_error', cv=kfold)
#[:, np.newaxis]
estimated_mse_sklearn[polydegree] = np.mean(-estimated_mse_folds)
plt.plot(polynomial, np.log10(estimated_mse_sklearn), label='Test Error')
plt.xlabel('Polynomial degree')
plt.ylabel('log10[MSE]')
plt.legend()
plt.show()
# Note that we have kept the intercept in the first column of design matrix $\boldsymbol{X}$. When we call the corresponding **Scikit-Learn** function we need thus to set the intercept to **False**. Libraries like **Scikit-Learn** normally scale the design matrix and does not fit intercept. See the discussions below.
#
# ## More on Rescaling data
#
# We end this chapter by adding some words on scaling and how to deal with the intercept for regression cases.
#
# When you are comparing your own code with for example **Scikit-Learn**'s
# library, there are some technicalities to keep in mind. The examples
# here demonstrate some of these aspects with potential pitfalls.
#
# The discussion here focuses on the role of the intercept, how we can
# set up the design matrix, what scaling we should use and other topics
# which tend confuse us.
#
# The intercept can be interpreted as the expected value of our
# target/output variables when all other predictors are set to zero.
# Thus, if we cannot assume that the expected outputs/targets are zero
# when all predictors are zero (the columns in the design matrix), it
# may be a bad idea to implement a model which penalizes the intercept.
# Furthermore, in for example Ridge and Lasso regression, the default solutions
# from the library **Scikit-Learn** (when not shrinking $\beta_0$) for the unknown parameters
# $\boldsymbol{\beta}$, are derived under the assumption that both $\boldsymbol{y}$ and
# $\boldsymbol{X}$ are zero centered, that is we subtract the mean values.
#
#
# If our predictors represent different scales, then it is important to
# standardize the design matrix $\boldsymbol{X}$ by subtracting the mean of each
# column from the corresponding column and dividing the column with its
# standard deviation. Most machine learning libraries do this as a default. This means that if you compare your code with the results from a given library,
# the results may differ.
#
# The
# [Standardscaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
# function in **Scikit-Learn** does this for us. For the data sets we
# have been studying in our various examples, the data are in many cases
# already scaled and there is no need to scale them. You as a user of different machine learning algorithms, should always perform a
# survey of your data, with a critical assessment of them in case you need to scale the data.
#
# If you need to scale the data, not doing so will give an *unfair*
# penalization of the parameters since their magnitude depends on the
# scale of their corresponding predictor.
#
# Suppose as an example that you
# you have an input variable given by the heights of different persons.
# Human height might be measured in inches or meters or
# kilometers. If measured in kilometers, a standard linear regression
# model with this predictor would probably give a much bigger
# coefficient term, than if measured in millimeters.
# This can clearly lead to problems in evaluating the cost/loss functions.
#
#
#
# Keep in mind that when you transform your data set before training a model, the same transformation needs to be done
# on your eventual new data set before making a prediction. If we translate this into a Python code, it would could be implemented as follows
# In[10]:
"""
#Model training, we compute the mean value of y and X
y_train_mean = np.mean(y_train)
X_train_mean = np.mean(X_train,axis=0)
X_train = X_train - X_train_mean
y_train = y_train - y_train_mean
# The we fit our model with the training data
trained_model = some_model.fit(X_train,y_train)
#Model prediction, we need also to transform our data set used for the prediction.
X_test = X_test - X_train_mean #Use mean from training data
y_pred = trained_model(X_test)
y_pred = y_pred + y_train_mean
"""
# Let us try to understand what this may imply mathematically when we
# subtract the mean values, also known as *zero centering*. For
# simplicity, we will focus on ordinary regression, as done in the above example.
#
# The cost/loss function for regression is
# $$
# C(\beta_0, \beta_1, ... , \beta_{p-1}) = \frac{1}{n}\sum_{i=0}^{n} \left(y_i - \beta_0 - \sum_{j=1}^{p-1} X_{ij}\beta_j\right)^2,.
# $$
# Recall also that we use the squared value. This expression can lead to an
# increased penalty for higher differences between predicted and
# output/target values.
#
# What we have done is to single out the $\beta_0$ term in the
# definition of the mean squared error (MSE). The design matrix $X$
# does in this case not contain any intercept column. When we take the
# derivative with respect to $\beta_0$, we want the derivative to obey
# $$
# \frac{\partial C}{\partial \beta_j} = 0,
# $$
# for all $j$. For $\beta_0$ we have
# $$
# \frac{\partial C}{\partial \beta_0} = -\frac{2}{n}\sum_{i=0}^{n-1} \left(y_i - \beta_0 - \sum_{j=1}^{p-1} X_{ij} \beta_j\right).
# $$
# Multiplying away the constant $2/n$, we obtain
# $$
# \sum_{i=0}^{n-1} \beta_0 = \sum_{i=0}^{n-1}y_i - \sum_{i=0}^{n-1} \sum_{j=1}^{p-1} X_{ij} \beta_j.
# $$
# Let us specialize first to the case where we have only two parameters $\beta_0$ and $\beta_1$.
# Our result for $\beta_0$ simplifies then to
# $$
# n\beta_0 = \sum_{i=0}^{n-1}y_i - \sum_{i=0}^{n-1} X_{i1} \beta_1.
# $$
# We obtain then
# $$
# \beta_0 = \frac{1}{n}\sum_{i=0}^{n-1}y_i - \beta_1\frac{1}{n}\sum_{i=0}^{n-1} X_{i1}.
# $$
# If we define
# $$
# \mu_{\boldsymbol{x}_1}=\frac{1}{n}\sum_{i=0}^{n-1} X_{i1},
# $$
# and the mean value of the outputs as
# $$
# \mu_y=\frac{1}{n}\sum_{i=0}^{n-1}y_i,
# $$
# we have
# $$
# \beta_0 = \mu_y - \beta_1\mu_{\boldsymbol{x}_1}.
# $$
# In the general case with more parameters than $\beta_0$ and $\beta_1$, we have
# $$
# \beta_0 = \frac{1}{n}\sum_{i=0}^{n-1}y_i - \frac{1}{n}\sum_{i=0}^{n-1}\sum_{j=1}^{p-1} X_{ij}\beta_j.
# $$
# We can rewrite the latter equation as
# $$
# \beta_0 = \frac{1}{n}\sum_{i=0}^{n-1}y_i - \sum_{j=1}^{p-1} \mu_{\boldsymbol{x}_j}\beta_j,
# $$
# where we have defined
# $$
# \mu_{\boldsymbol{x}_j}=\frac{1}{n}\sum_{i=0}^{n-1} X_{ij},
# $$
# the mean value for all elements of the column vector $\boldsymbol{x}_j$.
#
#
#
# Replacing $y_i$ with $y_i - y_i - \overline{\boldsymbol{y}}$ and centering also our design matrix results in a cost function (in vector-matrix disguise)
# $$
# C(\boldsymbol{\beta}) = (\boldsymbol{\tilde{y}} - \tilde{X}\boldsymbol{\beta})^T(\boldsymbol{\tilde{y}} - \tilde{X}\boldsymbol{\beta}).
# $$
# If we minimize with respect to $\boldsymbol{\beta}$ we have then
# $$
# \hat{\boldsymbol{\beta}} = (\tilde{X}^T\tilde{X})^{-1}\tilde{X}^T\boldsymbol{\tilde{y}},
# $$
# where $\boldsymbol{\tilde{y}} = \boldsymbol{y} - \overline{\boldsymbol{y}}$
# and $\tilde{X}_{ij} = X_{ij} - \frac{1}{n}\sum_{k=0}^{n-1}X_{kj}$.
#
# For Ridge regression we need to add $\lambda \boldsymbol{\beta}^T\boldsymbol{\beta}$ to the cost function and get then
# $$
# \hat{\boldsymbol{\beta}} = (\tilde{X}^T\tilde{X} + \lambda I)^{-1}\tilde{X}^T\boldsymbol{\tilde{y}}.
# $$
# What does this mean? And why do we insist on all this? Let us look at some examples.
#
#
# This code shows a simple first-order fit to a data set using the above transformed data, where we consider the role of the intercept first, by either excluding it or including it (*code example thanks to Øyvind Sigmundson Schøyen*). Here our scaling of the data is done by subtracting the mean values only.
# Note also that we do not split the data into training and test.
# In[11]:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
np.random.seed(2021)
def MSE(y_data,y_model):
n = np.size(y_model)
return np.sum((y_data-y_model)**2)/n
def fit_beta(X, y):
return np.linalg.pinv(X.T @ X) @ X.T @ y
true_beta = [2, 0.5, 3.7]
x = np.linspace(0, 1, 11)
y = np.sum(
np.asarray([x ** p * b for p, b in enumerate(true_beta)]), axis=0
) + 0.1 * np.random.normal(size=len(x))
degree = 3
X = np.zeros((len(x), degree))
# Include the intercept in the design matrix
for p in range(degree):
X[:, p] = x ** p
beta = fit_beta(X, y)
# Intercept is included in the design matrix
skl = LinearRegression(fit_intercept=False).fit(X, y)
print(f"True beta: {true_beta}")
print(f"Fitted beta: {beta}")
print(f"Sklearn fitted beta: {skl.coef_}")
ypredictOwn = X @ beta
ypredictSKL = skl.predict(X)
print(f"MSE with intercept column")
print(MSE(y,ypredictOwn))
print(f"MSE with intercept column from SKL")
print(MSE(y,ypredictSKL))
plt.figure()
plt.scatter(x, y, label="Data")
plt.plot(x, X @ beta, label="Fit")
plt.plot(x, skl.predict(X), label="Sklearn (fit_intercept=False)")
# Do not include the intercept in the design matrix
X = np.zeros((len(x), degree - 1))
for p in range(degree - 1):
X[:, p] = x ** (p + 1)
# Intercept is not included in the design matrix
skl = LinearRegression(fit_intercept=True).fit(X, y)
# Use centered values for X and y when computing coefficients
y_offset = np.average(y, axis=0)
X_offset = np.average(X, axis=0)
beta = fit_beta(X - X_offset, y - y_offset)
intercept = np.mean(y_offset - X_offset @ beta)
print(f"Manual intercept: {intercept}")
print(f"Fitted beta (wiothout intercept): {beta}")
print(f"Sklearn intercept: {skl.intercept_}")
print(f"Sklearn fitted beta (without intercept): {skl.coef_}")
ypredictOwn = X @ beta
ypredictSKL = skl.predict(X)
print(f"MSE with Manual intercept")
print(MSE(y,ypredictOwn+intercept))
print(f"MSE with Sklearn intercept")
print(MSE(y,ypredictSKL))
plt.plot(x, X @ beta + intercept, "--", label="Fit (manual intercept)")
plt.plot(x, skl.predict(X), "--", label="Sklearn (fit_intercept=True)")
plt.grid()
plt.legend()
plt.show()
# The intercept is the value of our output/target variable
# when all our features are zero and our function crosses the $y$-axis (for a one-dimensional case).
#
# Printing the MSE, we see first that both methods give the same MSE, as
# they should. However, when we move to for example Ridge regression,
# the way we treat the intercept may give a larger or smaller MSE,
# meaning that the MSE can be penalized by the value of the
# intercept. Not including the intercept in the fit, means that the
# regularization term does not include $\beta_0$. For different values
# of $\lambda$, this may lead to differeing MSE values.
#
# To remind the reader, the regularization term, with the intercept in Ridge regression, is given by
# $$
# \lambda \vert\vert \boldsymbol{\beta} \vert\vert_2^2 = \lambda \sum_{j=0}^{p-1}\beta_j^2,
# $$
# but when we take out the intercept, this equation becomes
# $$
# \lambda \vert\vert \boldsymbol{\beta} \vert\vert_2^2 = \lambda \sum_{j=1}^{p-1}\beta_j^2.
# $$
# For Lasso regression we have
# $$
# \lambda \vert\vert \boldsymbol{\beta} \vert\vert_1 = \lambda \sum_{j=1}^{p-1}\vert\beta_j\vert.
# $$
# It means that, when scaling the design matrix and the outputs/targets,
# by subtracting the mean values, we have an optimization problem which
# is not penalized by the intercept. The MSE value can then be smaller
# since it focuses only on the remaining quantities. If we however bring
# back the intercept, we will get a MSE which then contains the
# intercept.
#
#
# Armed with this wisdom, we attempt first to simply set the intercept equal to **False** in our implementation of Ridge regression for our well-known vanilla data set.
# In[12]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import linear_model
def MSE(y_data,y_model):
n = np.size(y_model)
return np.sum((y_data-y_model)**2)/n
# A seed just to ensure that the random numbers are the same for every run.
# Useful for eventual debugging.
np.random.seed(3155)
n = 100
x = np.random.rand(n)
y = np.exp(-x**2) + 1.5 * np.exp(-(x-2)**2)
Maxpolydegree = 20
X = np.zeros((n,Maxpolydegree))
#We include explicitely the intercept column
for degree in range(Maxpolydegree):
X[:,degree] = x**degree
# We split the data in test and training data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
p = Maxpolydegree
I = np.eye(p,p)
# Decide which values of lambda to use
nlambdas = 6
MSEOwnRidgePredict = np.zeros(nlambdas)
MSERidgePredict = np.zeros(nlambdas)
lambdas = np.logspace(-4, 2, nlambdas)
for i in range(nlambdas):
lmb = lambdas[i]
OwnRidgeBeta = np.linalg.pinv(X_train.T @ X_train+lmb*I) @ X_train.T @ y_train
# Note: we include the intercept column and no scaling
RegRidge = linear_model.Ridge(lmb,fit_intercept=False)
RegRidge.fit(X_train,y_train)
# and then make the prediction
ytildeOwnRidge = X_train @ OwnRidgeBeta
ypredictOwnRidge = X_test @ OwnRidgeBeta
ytildeRidge = RegRidge.predict(X_train)
ypredictRidge = RegRidge.predict(X_test)
MSEOwnRidgePredict[i] = MSE(y_test,ypredictOwnRidge)
MSERidgePredict[i] = MSE(y_test,ypredictRidge)
print("Beta values for own Ridge implementation")
print(OwnRidgeBeta)
print("Beta values for Scikit-Learn Ridge implementation")
print(RegRidge.coef_)
print("MSE values for own Ridge implementation")
print(MSEOwnRidgePredict[i])
print("MSE values for Scikit-Learn Ridge implementation")
print(MSERidgePredict[i])
# Now plot the results
plt.figure()
plt.plot(np.log10(lambdas), MSEOwnRidgePredict, 'r', label = 'MSE own Ridge Test')
plt.plot(np.log10(lambdas), MSERidgePredict, 'g', label = 'MSE Ridge Test')
plt.xlabel('log10(lambda)')
plt.ylabel('MSE')
plt.legend()
plt.show()
# The results here agree when we force **Scikit-Learn**'s Ridge function to include the first column in our design matrix.
# We see that the results agree very well. Here we have thus explicitely included the intercept column in the design matrix.
# What happens if we do not include the intercept in our fit?
# Let us see how we can change this code by zero centering.
# In[13]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
def MSE(y_data,y_model):
n = np.size(y_model)
return np.sum((y_data-y_model)**2)/n
# A seed just to ensure that the random numbers are the same for every run.
# Useful for eventual debugging.
np.random.seed(315)
n = 100
x = np.random.rand(n)
y = np.exp(-x**2) + 1.5 * np.exp(-(x-2)**2)
Maxpolydegree = 20
X = np.zeros((n,Maxpolydegree-1))
for degree in range(1,Maxpolydegree): #No intercept column
X[:,degree-1] = x**(degree)
# We split the data in test and training data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
#For our own implementation, we will need to deal with the intercept by centering the design matrix and the target variable
X_train_mean = np.mean(X_train,axis=0)
#Center by removing mean from each feature
X_train_scaled = X_train - X_train_mean
X_test_scaled = X_test - X_train_mean
#The model intercept (called y_scaler) is given by the mean of the target variable (IF X is centered)
#Remove the intercept from the training data.
y_scaler = np.mean(y_train)
y_train_scaled = y_train - y_scaler
p = Maxpolydegree-1
I = np.eye(p,p)
# Decide which values of lambda to use
nlambdas = 6
MSEOwnRidgePredict = np.zeros(nlambdas)
MSERidgePredict = np.zeros(nlambdas)
lambdas = np.logspace(-4, 2, nlambdas)
for i in range(nlambdas):
lmb = lambdas[i]
OwnRidgeBeta = np.linalg.pinv(X_train_scaled.T @ X_train_scaled+lmb*I) @ X_train_scaled.T @ (y_train_scaled)
intercept_ = y_scaler - X_train_mean@OwnRidgeBeta #The intercept can be shifted so the model can predict on uncentered data
#Add intercept to prediction
ypredictOwnRidge = X_test_scaled @ OwnRidgeBeta + y_scaler
RegRidge = linear_model.Ridge(lmb)
RegRidge.fit(X_train,y_train)
ypredictRidge = RegRidge.predict(X_test)
MSEOwnRidgePredict[i] = MSE(y_test,ypredictOwnRidge)
MSERidgePredict[i] = MSE(y_test,ypredictRidge)
print("Beta values for own Ridge implementation")
print(OwnRidgeBeta) #Intercept is given by mean of target variable
print("Beta values for Scikit-Learn Ridge implementation")
print(RegRidge.coef_)
print('Intercept from own implementation:')
print(intercept_)
print('Intercept from Scikit-Learn Ridge implementation')
print(RegRidge.intercept_)
print("MSE values for own Ridge implementation")
print(MSEOwnRidgePredict[i])
print("MSE values for Scikit-Learn Ridge implementation")
print(MSERidgePredict[i])
# Now plot the results
plt.figure()
plt.plot(np.log10(lambdas), MSEOwnRidgePredict, 'b--', label = 'MSE own Ridge Test')
plt.plot(np.log10(lambdas), MSERidgePredict, 'g--', label = 'MSE SL Ridge Test')
plt.xlabel('log10(lambda)')
plt.ylabel('MSE')
plt.legend()
plt.show()
# We see here, when compared to the code which includes explicitely the
# intercept column, that our MSE value is actually smaller. This is
# because the regularization term does not include the intercept value
# $\beta_0$ in the fitting. This applies to Lasso regularization as
# well. It means that our optimization is now done only with the
# centered matrix and/or vector that enter the fitting procedure. Note
# also that the problem with the intercept occurs mainly in these type
# of polynomial fitting problem.
#
# The next example is indeed an example where all these discussions about the role of intercept are not present.
#
# ## More complicated Example: The Ising model
#
# The one-dimensional Ising model with nearest neighbor interaction, no
# external field and a constant coupling constant $J$ is given by
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# H = -J \sum_{k}^L s_k s_{k + 1},
# \label{_auto1} \tag{1}
# \end{equation}
# $$
# where $s_i \in \{-1, 1\}$ and $s_{N + 1} = s_1$. The number of spins
# in the system is determined by $L$. For the one-dimensional system
# there is no phase transition.
#
# We will look at a system of $L = 40$ spins with a coupling constant of
# $J = 1$. To get enough training data we will generate 10000 states
# with their respective energies.
# In[14]:
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
import scipy.linalg as scl
from sklearn.model_selection import train_test_split
import tqdm
sns.set(color_codes=True)
cmap_args=dict(vmin=-1., vmax=1., cmap='seismic')
L = 40
n = int(1e4)
spins = np.random.choice([-1, 1], size=(n, L))
J = 1.0
energies = np.zeros(n)
for i in range(n):
energies[i] = - J * np.dot(spins[i], np.roll(spins[i], 1))
# Here we use ordinary least squares
# regression to predict the energy for the nearest neighbor
# one-dimensional Ising model on a ring, i.e., the endpoints wrap
# around. We will use linear regression to fit a value for
# the coupling constant to achieve this.
#
# A more general form for the one-dimensional Ising model is
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# H = - \sum_j^L \sum_k^L s_j s_k J_{jk}.
# \label{_auto2} \tag{2}
# \end{equation}
# $$
# Here we allow for interactions beyond the nearest neighbors and a state dependent
# coupling constant. This latter expression can be formulated as
# a matrix-product
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# \boldsymbol{H} = \boldsymbol{X} J,
# \label{_auto3} \tag{3}
# \end{equation}
# $$
# where $X_{jk} = s_j s_k$ and $J$ is a matrix which consists of the
# elements $-J_{jk}$. This form of writing the energy fits perfectly
# with the form utilized in linear regression, that is
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# \boldsymbol{y} = \boldsymbol{X}\boldsymbol{\beta} + \boldsymbol{\epsilon},
# \label{_auto4} \tag{4}
# \end{equation}
# $$
# We split the data in training and test data as discussed in the previous example
# In[15]:
X = np.zeros((n, L ** 2))
for i in range(n):
X[i] = np.outer(spins[i], spins[i]).ravel()
y = energies
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# In the ordinary least squares method we choose the cost function
# <!-- Equation labels as ordinary links -->
# <div id="_auto5"></div>
#
# $$
# \begin{equation}
# C(\boldsymbol{X}, \boldsymbol{\beta})= \frac{1}{n}\left\{(\boldsymbol{X}\boldsymbol{\beta} - \boldsymbol{y})^T(\boldsymbol{X}\boldsymbol{\beta} - \boldsymbol{y})\right\}.
# \label{_auto5} \tag{5}
# \end{equation}
# $$
# We then find the extremal point of $C$ by taking the derivative with respect to $\boldsymbol{\beta}$ as discussed above.
# This yields the expression for $\boldsymbol{\beta}$ to be
# $$
# \boldsymbol{\beta} = \frac{\boldsymbol{X}^T \boldsymbol{y}}{\boldsymbol{X}^T \boldsymbol{X}},
# $$
# which immediately imposes some requirements on $\boldsymbol{X}$ as there must exist
# an inverse of $\boldsymbol{X}^T \boldsymbol{X}$. If the expression we are modeling contains an
# intercept, i.e., a constant term, we must make sure that the
# first column of $\boldsymbol{X}$ consists of $1$. We do this here
# In[16]:
X_train_own = np.concatenate(
(np.ones(len(X_train))[:, np.newaxis], X_train),
axis=1
)
X_test_own = np.concatenate(
(np.ones(len(X_test))[:, np.newaxis], X_test),
axis=1
)
# Doing the inversion directly turns out to be a bad idea since the matrix
# $\boldsymbol{X}^T\boldsymbol{X}$ is singular. An alternative approach is to use the **singular
# value decomposition**. Using the definition of the Moore-Penrose
# pseudoinverse we can write the equation for $\boldsymbol{\beta}$ as
# $$
# \boldsymbol{\beta} = \boldsymbol{X}^{+}\boldsymbol{y},
# $$
# where the pseudoinverse of $\boldsymbol{X}$ is given by
# $$
# \boldsymbol{X}^{+} = \frac{\boldsymbol{X}^T}{\boldsymbol{X}^T\boldsymbol{X}}.
# $$
# Using singular value decomposition we can decompose the matrix $\boldsymbol{X} = \boldsymbol{U}\boldsymbol{\Sigma} \boldsymbol{V}^T$,
# where $\boldsymbol{U}$ and $\boldsymbol{V}$ are orthogonal(unitary) matrices and $\boldsymbol{\Sigma}$ contains the singular values (more details below).
# where $X^{+} = V\Sigma^{+} U^T$. This reduces the equation for
# $\omega$ to
# <!-- Equation labels as ordinary links -->
# <div id="_auto6"></div>
#
# $$
# \begin{equation}
# \boldsymbol{\beta} = \boldsymbol{V}\boldsymbol{\Sigma}^{+} \boldsymbol{U}^T \boldsymbol{y}.
# \label{_auto6} \tag{6}
# \end{equation}
# $$
# Note that solving this equation by actually doing the pseudoinverse
# (which is what we will do) is not a good idea as this operation scales
# as $\mathcal{O}(n^3)$, where $n$ is the number of elements in a
# general matrix. Instead, doing $QR$-factorization and solving the
# linear system as an equation would reduce this down to
# $\mathcal{O}(n^2)$ operations.
# In[17]:
def ols_svd(x: np.ndarray, y: np.ndarray) -> np.ndarray:
u, s, v = scl.svd(x)
return v.T @ scl.pinv(scl.diagsvd(s, u.shape[0], v.shape[0])) @ u.T @ y
# In[18]:
beta = ols_svd(X_train_own,y_train)
# When extracting the $J$-matrix we need to make sure that we remove the intercept, as is done here
# In[19]:
J = beta[1:].reshape(L, L)
# A way of looking at the coefficients in $J$ is to plot the matrices as images.
# In[20]:
fig = plt.figure(figsize=(20, 14))
im = plt.imshow(J, **cmap_args)
plt.title("OLS", fontsize=18)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
cb = fig.colorbar(im)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=18)
plt.show()
# It is interesting to note that OLS
# considers both $J_{j, j + 1} = -0.5$ and $J_{j, j - 1} = -0.5$ as
# valid matrix elements for $J$.
# In our discussion below on hyperparameters and Ridge and Lasso regression we will see that
# this problem can be removed, partly and only with Lasso regression.
#
# In this case our matrix inversion was actually possible. The obvious question now is what is the mathematics behind the SVD?
#
#
#
#
#
# Let us now
# focus on Ridge and Lasso regression as well. We repeat some of the
# basic parts of the Ising model and the setup of the training and test
# data. The one-dimensional Ising model with nearest neighbor
# interaction, no external field and a constant coupling constant $J$ is
# given by
# <!-- Equation labels as ordinary links -->
# <div id="_auto7"></div>
#
# $$
# \begin{equation}
# H = -J \sum_{k}^L s_k s_{k + 1},
# \label{_auto7} \tag{7}
# \end{equation}
# $$
# where $s_i \in \{-1, 1\}$ and $s_{N + 1} = s_1$. The number of spins in the system is determined by $L$. For the one-dimensional system there is no phase transition.
#
# We will look at a system of $L = 40$ spins with a coupling constant of $J = 1$. To get enough training data we will generate 10000 states with their respective energies.
# In[21]:
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
import scipy.linalg as scl
from sklearn.model_selection import train_test_split
import sklearn.linear_model as skl
import tqdm
sns.set(color_codes=True)
cmap_args=dict(vmin=-1., vmax=1., cmap='seismic')
L = 40
n = int(1e4)
spins = np.random.choice([-1, 1], size=(n, L))
J = 1.0
energies = np.zeros(n)
for i in range(n):
energies[i] = - J * np.dot(spins[i], np.roll(spins[i], 1))
# A more general form for the one-dimensional Ising model is
# <!-- Equation labels as ordinary links -->
# <div id="_auto8"></div>
#
# $$
# \begin{equation}
# H = - \sum_j^L \sum_k^L s_j s_k J_{jk}.
# \label{_auto8} \tag{8}
# \end{equation}
# $$
# Here we allow for interactions beyond the nearest neighbors and a more
# adaptive coupling matrix. This latter expression can be formulated as
# a matrix-product on the form
# <!-- Equation labels as ordinary links -->
# <div id="_auto9"></div>
#
# $$
# \begin{equation}
# H = X J,
# \label{_auto9} \tag{9}
# \end{equation}
# $$
# where $X_{jk} = s_j s_k$ and $J$ is the matrix consisting of the
# elements $-J_{jk}$. This form of writing the energy fits perfectly
# with the form utilized in linear regression, viz.
# <!-- Equation labels as ordinary links -->
# <div id="_auto10"></div>
#
# $$
# \begin{equation}
# \boldsymbol{y} = \boldsymbol{X}\boldsymbol{\beta} + \boldsymbol{\epsilon}.
# \label{_auto10} \tag{10}
# \end{equation}
# $$
# We organize the data as we did above
# In[22]:
X = np.zeros((n, L ** 2))
for i in range(n):
X[i] = np.outer(spins[i], spins[i]).ravel()
y = energies
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.96)
X_train_own = np.concatenate(
(np.ones(len(X_train))[:, np.newaxis], X_train),
axis=1
)
X_test_own = np.concatenate(
(np.ones(len(X_test))[:, np.newaxis], X_test),
axis=1
)
# We will do all fitting with **Scikit-Learn**,
# In[23]:
clf = skl.LinearRegression().fit(X_train, y_train)
# When extracting the $J$-matrix we make sure to remove the intercept
# In[24]:
J_sk = clf.coef_.reshape(L, L)
# And then we plot the results
# In[25]:
fig = plt.figure(figsize=(20, 14))
im = plt.imshow(J_sk, **cmap_args)
plt.title("LinearRegression from Scikit-learn", fontsize=18)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
cb = fig.colorbar(im)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=18)
plt.show()
# The results agree perfectly with our previous discussion where we used our own code.
#
#
# Having explored the ordinary least squares we move on to ridge
# regression. In ridge regression we include a **regularizer**. This
# involves a new cost function which leads to a new estimate for the
# weights $\boldsymbol{\beta}$. This results in a penalized regression problem. The
# cost function is given by
# 6
# 0
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# In[26]:
_lambda = 0.1
clf_ridge = skl.Ridge(alpha=_lambda).fit(X_train, y_train)
J_ridge_sk = clf_ridge.coef_.reshape(L, L)
fig = plt.figure(figsize=(20, 14))
im = plt.imshow(J_ridge_sk, **cmap_args)
plt.title("Ridge from Scikit-learn", fontsize=18)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
cb = fig.colorbar(im)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=18)
plt.show()
# In the **Least Absolute Shrinkage and Selection Operator** (LASSO)-method we get a third cost function.
# <!-- Equation labels as ordinary links -->
# <div id="_auto12"></div>
#
# $$
# \begin{equation}
# C(\boldsymbol{X}, \boldsymbol{\beta}; \lambda) = (\boldsymbol{X}\boldsymbol{\beta} - \boldsymbol{y})^T(\boldsymbol{X}\boldsymbol{\beta} - \boldsymbol{y}) + \lambda \sqrt{\boldsymbol{\beta}^T\boldsymbol{\beta}}.
# \label{_auto12} \tag{12}
# \end{equation}
# $$
# Finding the extremal point of this cost function is not so straight-forward as in least squares and ridge. We will therefore rely solely on the function ``Lasso`` from **Scikit-Learn**.
# In[27]:
clf_lasso = skl.Lasso(alpha=_lambda).fit(X_train, y_train)
J_lasso_sk = clf_lasso.coef_.reshape(L, L)
fig = plt.figure(figsize=(20, 14))
im = plt.imshow(J_lasso_sk, **cmap_args)
plt.title("Lasso from Scikit-learn", fontsize=18)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
cb = fig.colorbar(im)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=18)
plt.show()
# It is quite striking how LASSO breaks the symmetry of the coupling
# constant as opposed to ridge and OLS. We get a sparse solution with
# $J_{j, j + 1} = -1$.
#
#
#
#
# We see how the different models perform for a different set of values for $\lambda$.
# In[28]:
lambdas = np.logspace(-4, 5, 10)
train_errors = {
"ols_sk": np.zeros(lambdas.size),
"ridge_sk": np.zeros(lambdas.size),
"lasso_sk": np.zeros(lambdas.size)
}
test_errors = {
"ols_sk": np.zeros(lambdas.size),
"ridge_sk": np.zeros(lambdas.size),
"lasso_sk": np.zeros(lambdas.size)
}
plot_counter = 1
fig = plt.figure(figsize=(32, 54))
for i, _lambda in enumerate(tqdm.tqdm(lambdas)):
for key, method in zip(
["ols_sk", "ridge_sk", "lasso_sk"],
[skl.LinearRegression(), skl.Ridge(alpha=_lambda), skl.Lasso(alpha=_lambda)]
):
method = method.fit(X_train, y_train)
train_errors[key][i] = method.score(X_train, y_train)
test_errors[key][i] = method.score(X_test, y_test)
omega = method.coef_.reshape(L, L)
plt.subplot(10, 5, plot_counter)
plt.imshow(omega, **cmap_args)
plt.title(r"%s, $\lambda = %.4f$" % (key, _lambda))
plot_counter += 1
plt.show()
# We see that LASSO reaches a good solution for low
# values of $\lambda$, but will "wither" when we increase $\lambda$ too
# much. Ridge is more stable over a larger range of values for
# $\lambda$, but eventually also fades away.
#
#
# To determine which value of $\lambda$ is best we plot the accuracy of
# the models when predicting the training and the testing set. We expect
# the accuracy of the training set to be quite good, but if the accuracy
# of the testing set is much lower this tells us that we might be
# subject to an overfit model. The ideal scenario is an accuracy on the
# testing set that is close to the accuracy of the training set.
# In[29]:
fig = plt.figure(figsize=(20, 14))
colors = {
"ols_sk": "r",
"ridge_sk": "y",
"lasso_sk": "c"
}
for key in train_errors:
plt.semilogx(
lambdas,
train_errors[key],
colors[key],
label="Train {0}".format(key),
linewidth=4.0
)
for key in test_errors:
plt.semilogx(
lambdas,
test_errors[key],
colors[key] + "--",
label="Test {0}".format(key),
linewidth=4.0
)
plt.legend(loc="best", fontsize=18)
plt.xlabel(r"$\lambda$", fontsize=18)
plt.ylabel(r"$R^2$", fontsize=18)
plt.tick_params(labelsize=18)
plt.show()
# From the above figure we can see that LASSO with $\lambda = 10^{-2}$
# achieves a very good accuracy on the test set. This by far surpasses the
# other models for all values of $\lambda$.
#
#
#
#
#
#
# ## Exercises and Projects
#
#
#
# The main aim of this project is to study in more detail various
# regression methods, including the Ordinary Least Squares (OLS) method,
# The total score is **100** points. Each subtask has its own final score.
#
#
# We will first study how to fit polynomials to a specific
# two-dimensional function called [Franke's
# function](http://www.dtic.mil/dtic/tr/fulltext/u2/a081688.pdf). This
# is a function which has been widely used when testing various
# interpolation and fitting algorithms. Furthermore, after having
# established the model and the method, we will employ resamling
# techniques such as cross-validation and/or bootstrap in order to perform a
# proper assessment of our models. We will also study in detail the
# so-called Bias-Variance trade off.
#
#
# The Franke function, which is a weighted sum of four exponentials reads as follows
# $$
# \begin{align*}
# f(x,y) &= \frac{3}{4}\exp{\left(-\frac{(9x-2)^2}{4} - \frac{(9y-2)^2}{4}\right)}+\frac{3}{4}\exp{\left(-\frac{(9x+1)^2}{49}- \frac{(9y+1)}{10}\right)} \\
# &+\frac{1}{2}\exp{\left(-\frac{(9x-7)^2}{4} - \frac{(9y-3)^2}{4}\right)} -\frac{1}{5}\exp{\left(-(9x-4)^2 - (9y-7)^2\right) }.
# \end{align*}
# $$
# The function will be defined for $x,y\in [0,1]$. Our first step will
# be to perform an OLS regression analysis of this function, trying out
# a polynomial fit with an $x$ and $y$ dependence of the form $[x, y,
# x^2, y^2, xy, \dots]$. We will also include bootstrap first as
# a resampling technique. After that we will include the cross-validation technique. As in homeworks 1 and 2, we can use a uniform
# distribution to set up the arrays of values for $x$ and $y$, or as in
# the example below just a set of fixed
# values for $x$ and $y$ with a given step
# size. We will fit a
# function (for example a polynomial) of $x$ and $y$. Thereafter we
# will repeat much of the same procedure using the Ridge and Lasso
# regression methods, introducing thus a dependence on the bias
# (penalty) $\lambda$.
#
# Finally we are going to use (real) digital terrain data and try to
# reproduce these data using the same methods. We will also try to go
# beyond the second-order polynomials metioned above and explore
# which polynomial fits the data best.
#
#
# The Python code for the Franke function is included here (it performs also a three-dimensional plot of it)
# In[30]:
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from random import random, seed
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
x = np.arange(0, 1, 0.05)
y = np.arange(0, 1, 0.05)
x, y = np.meshgrid(x,y)
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
z = FrankeFunction(x, y)
# Plot the surface.
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
ax.set_zlim(-0.10, 1.40)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
# ### Exercise: Ordinary Least Square (OLS) on the Franke function
#
# We will generate our own dataset for a function
# $\mathrm{FrankeFunction}(x,y)$ with $x,y \in [0,1]$. The function
# $f(x,y)$ is the Franke function. You should explore also the addition
# of an added stochastic noise to this function using the normal
# distribution $N(0,1)$.
#
# *Write your own code* (using either a matrix inversion or a singular
# value decomposition from e.g., **numpy** ) or use your code from
# homeworks 1 and 2 and perform a standard least square regression
# analysis using polynomials in $x$ and $y$ up to fifth order. Find the
# [confidence intervals](https://en.wikipedia.org/wiki/Confidence_interval) of the parameters (estimators) $\beta$ by computing their
# variances, evaluate the Mean Squared error (MSE)
# $$
# MSE(\hat{y},\hat{\tilde{y}}) = \frac{1}{n}
# \sum_{i=0}^{n-1}(y_i-\tilde{y}_i)^2,
# $$
# and the $R^2$ score function. If $\tilde{\hat{y}}_i$ is the predicted
# value of the $i-th$ sample and $y_i$ is the corresponding true value,
# then the score $R^2$ is defined as
# $$
# R^2(\hat{y}, \tilde{\hat{y}}) = 1 - \frac{\sum_{i=0}^{n - 1} (y_i - \tilde{y}_i)^2}{\sum_{i=0}^{n - 1} (y_i - \bar{y})^2},
# $$
# where we have defined the mean value of $\hat{y}$ as
# $$
# \bar{y} = \frac{1}{n} \sum_{i=0}^{n - 1} y_i.
# $$
# Your code has to include a scaling of the data (for example by
# subtracting the mean value), and
# a split of the data in training and test data. For this exercise you can
# either write your own code or use for example the function for
# splitting training data provided by the library **Scikit-Learn** (make
# sure you have installed it). This function is called
# $train\_test\_split$. **You should present a critical discussion of why and how you have scaled or not scaled the data**.
#
# It is normal in essentially all Machine Learning studies to split the
# data in a training set and a test set (eventually also an additional
# validation set). There
# is no explicit recipe for how much data should be included as training
# data and say test data. An accepted rule of thumb is to use
# approximately $2/3$ to $4/5$ of the data as training data.
#
#
# You can easily reuse the solutions to your exercises from week 35 and week 36.
#
#
#
# ### Exercise: Bias-variance trade-off and resampling techniques
#
# Our aim here is to study the bias-variance trade-off by implementing the **bootstrap** resampling technique.
#
# With a code which does OLS and includes resampling techniques,
# we will now discuss the bias-variance trade-off in the context of
# continuous predictions such as regression. However, many of the
# intuitions and ideas discussed here also carry over to classification
# tasks and basically all Machine Learning algorithms.
#
# Before you perform an analysis of the bias-variance trade-off on your test data, make
# first a figure similar to Fig. 2.11 of Hastie, Tibshirani, and
# Friedman. Figure 2.11 of this reference displays only the test and training MSEs. The test MSE can be used to
# indicate possible regions of low/high bias and variance. You will most likely not get an
# equally smooth curve!
#
# With this result we move on to the bias-variance trade-off analysis.
#
# Consider a
# dataset $\mathcal{L}$ consisting of the data
# $\mathbf{X}_\mathcal{L}=\{(y_j, \boldsymbol{x}_j), j=0\ldots n-1\}$.
#
# Let us assume that the true data is generated from a noisy model
# $$
# \boldsymbol{y}=f(\boldsymbol{x}) + \boldsymbol{\epsilon}.
# $$
# Here $\epsilon$ is normally distributed with mean zero and standard
# deviation $\sigma^2$.
#
# In our derivation of the ordinary least squares method we defined then
# an approximation to the function $f$ in terms of the parameters
# $\boldsymbol{\beta}$ and the design matrix $\boldsymbol{X}$ which embody our model,
# that is $\boldsymbol{\tilde{y}}=\boldsymbol{X}\boldsymbol{\beta}$.
#
# The parameters $\boldsymbol{\beta}$ are in turn found by optimizing the means
# squared error via the so-called cost function
# $$
# C(\boldsymbol{X},\boldsymbol{\beta}) =\frac{1}{n}\sum_{i=0}^{n-1}(y_i-\tilde{y}_i)^2=\mathbb{E}\left[(\boldsymbol{y}-\boldsymbol{\tilde{y}})^2\right].
# $$
# Here the expected value $\mathbb{E}$ is the sample value.
#
# Show that you can rewrite this as
# $$
# \mathbb{E}\left[(\boldsymbol{y}-\boldsymbol{\tilde{y}})^2\right]=\frac{1}{n}\sum_i(f_i-\mathbb{E}\left[\boldsymbol{\tilde{y}}\right])^2+\frac{1}{n}\sum_i(\tilde{y}_i-\mathbb{E}\left[\boldsymbol{\tilde{y}}\right])^2+\sigma^2.
# $$
# Explain what the terms mean, which one is the bias and which one is
# the variance and discuss their interpretations.
#
# Perform then a bias-variance analysis of the Franke function by
# studying the MSE value as function of the complexity of your model.
#
# Discuss the bias and variance trade-off as function
# of your model complexity (the degree of the polynomial) and the number
# of data points, and possibly also your training and test data using the **bootstrap** resampling method.
#
# Note also that when you calculate the bias, in all applications you don't know the function values $f_i$. You would hence replace them with the actual data points $y_i$.
#
#
# ### Exercise: Cross-validation as resampling techniques, adding more complexity
#
# The aim here is to write your own code for another widely popular
# resampling technique, the so-called cross-validation method. Again,
# before you start with cross-validation approach, you should scale your
# data.
#
# Implement the $k$-fold cross-validation algorithm (write your own
# code) and evaluate again the MSE function resulting
# from the test folds. You can compare your own code with that from
# **Scikit-Learn** if needed.
#
# Compare the MSE you get from your cross-validation code with the one
# you got from your **bootstrap** code. Comment your results. Try $5-10$
# folds. You can also compare your own cross-validation code with the
# one provided by **Scikit-Learn**.
#
#
# ### Exercise: Ridge Regression on the Franke function with resampling
#
# Write your own code for the Ridge method, either using matrix
# inversion or the singular value decomposition as done in the previous
# exercise. Perform the same bootstrap analysis as in the
# Exercise 2 (for the same polynomials) and the cross-validation in exercise 3 but now for different values of $\lambda$. Compare and
# analyze your results with those obtained in exercises 1-3. Study the
# dependence on $\lambda$.
#
# Study also the bias-variance trade-off as function of various values of
# the parameter $\lambda$. For the bias-variance trade-off, use the **bootstrap** resampling method. Comment your results.
#
# ### Exercise: Lasso Regression on the Franke function with resampling
#
# This exercise is essentially a repeat of the previous two ones, but now
# with Lasso regression. Write either your own code (difficult and optional) or, in this case,
# you can also use the functionalities of **Scikit-Learn** (recommended).
# Give a
# critical discussion of the three methods and a judgement of which
# model fits the data best. Perform here as well an analysis of the bias-variance trade-off using the **bootstrap** resampling technique and an analysis of the mean squared error using cross-validation.
#
# ### Exercise: Analysis of real data
#
# With our codes functioning and having been tested properly on a
# simpler function we are now ready to look at real data. We will
# essentially repeat in this exercise what was done in exercises 1-5. However, we
# need first to download the data and prepare properly the inputs to our
# codes. We are going to download digital terrain data from the website
# <https://earthexplorer.usgs.gov/>,
#
# Or, if you prefer, we have placed selected datafiles at <https://github.com/CompPhysics/MachineLearning/tree/master/doc/Projects/2021/Project1/DataFiles>
#
# In order to obtain data for a specific region, you need to register as
# a user (free) at this website and then decide upon which area you want
# to fetch the digital terrain data from. In order to be able to read
# the data properly, you need to specify that the format should be **SRTM
# Arc-Second Global** and download the data as a **GeoTIF** file. The
# files are then stored in *tif* format which can be imported into a
# Python program using
# In[31]:
scipy.misc.imread
# Here is a simple part of a Python code which reads and plots the data
# from such files
# In[ ]:
"""
import numpy as np
from imageio import imread
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# Load the terrain
terrain1 = imread('SRTM_data_Norway_1.tif')
# Show the terrain
plt.figure()
plt.title('Terrain over Norway 1')
plt.imshow(terrain1, cmap='gray')
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
"""
# If you should have problems in downloading the digital terrain data,
# we provide two examples under the data folder of project 1. One is
# from a region close to Stavanger in Norway and the other Møsvatn
# Austfjell, again in Norway.
# Feel free to produce your own terrain data.
#
#
# Alternatively, if you would like to use another data set, feel free to do so. This could be data close to your reseach area or simply a data set you found interesting. See for example [kaggle.com](https://www.kaggle.com/datasets) for examples.
#
#
# Our final part deals with the parameterization of your digital terrain
# data (or your own data). We will apply all three methods for linear regression, the same type (or higher order) of polynomial
# approximation and cross-validation as resampling technique to evaluate which
# model fits the data best.
#
# At the end, you should present a critical evaluation of your results
# and discuss the applicability of these regression methods to the type
# of data presented here (either the terrain data we propose or other data sets).
|
CompPhysics/MachineLearning
|
doc/LectureNotes/_build/jupyter_execute/chapter3.py
|
Python
|
cc0-1.0
| 93,304
|
[
"Gaussian"
] |
1305f66b01aec555840261a12850bbc69ab507b27b0bb43a7e3b060a4bd94297
|
from __future__ import unicode_literals
from datetime import datetime
import os
from django.utils import html
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.unittest import TestCase
class TestUtilsHtml(TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{0} {1} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
)
for value, output in items:
self.check_output(f, value, output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
def test_clean_html(self):
f = html.clean_html
items = (
('<p>I <i>believe</i> in <b>semantic markup</b>!</p>', '<p>I <em>believe</em> in <strong>semantic markup</strong>!</p>'),
('I escape & I don\'t <a href="#" target="_blank">target</a>', 'I escape & I don\'t <a href="#" >target</a>'),
('<p>I kill whitespace</p><br clear="all"><p> </p>', '<p>I kill whitespace</p>'),
# also a regression test for #7267: this used to raise an UnicodeDecodeError
('<p>* foo</p><p>* bar</p>', '<ul>\n<li> foo</li><li> bar</li>\n</ul>'),
)
for value, output in items:
self.check_output(f, value, output)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
self.assertEqual(f(value, tags), output)
|
mdj2/django
|
tests/utils_tests/test_html.py
|
Python
|
bsd-3-clause
| 7,539
|
[
"ADF"
] |
ecb5178b9bd214bc9e00a578209b51c3ea65051b67289ab1732315d801f78e20
|
"""Generate quantities reated to the symmetry of the lattice. This module
draws heavily from Setyawan, Wahyu, and Stefano Curtarolo. "High-throughput
electronic band structure calculations: Challenges and tools." Computational
Materials Science 49.2 (2010): 299-312.
"""
import numpy as np
from numpy.linalg import norm, inv, det
import math, itertools
from copy import deepcopy
import itertools as it
from itertools import islice, product
from phenum.grouptheory import SmithNormalForm
from phenum.vector_utils import _minkowski_reduce_basis
from phenum.symmetry import get_lattice_pointGroup, get_spaceGroup
from BZI.utilities import check_contained, find_point_indices, swap_rows_columns
class Lattice(object):
"""Create a lattice.
Args:
centering_type (str): identifies the position of lattice points in
the conventional unit cell. Option include 'prim', 'base', 'body',
and 'center'.
lattice_constants (list): a list of constants that correspond to the
lengths of the lattice vectors in the conventional unit cell ordered
as [a,b,c].
lattice_angles (list): a list of angles in radians that correspond to
the angles between lattice vectors in the conventional unit cell
ordered as [alpha, beta, gamma] where alpha is the angle between bc,
beta is the angle between ac, and gamma is the angle between ab.
convention (str): gives the convention of for finding the reciprocal lattice
vectors. Options include 'ordinary' and 'angular'. Angular include a
factor of 2pi.
Attributes:
centering (str): the type of lattice point centering in the
conventional unit cell. Option include 'prim', 'base', 'body', and
'center'.
constants (list): a list of constants that correspond to the
lengths of the lattice vectors in the conventional unit cell ordered
as [a,b,c].
angles (list): a list of angles in radians that correspond to
the angles between lattice vectors in the conventional unit cell
ordered as [alpha, beta, gamma] where alpha is the angle between bc,
beta is the angle between ac, and gamma is the angle between ab.
vectors (numpy.ndarray): an array of primitive lattice vectors
as columns of a 3x3 matrix.
reciprocal_vectors (numpy.ndarray): the reciprocal primitive
translation vectors as columns of a 3x3 matrix.
symmetry_group (numpy.ndarray): the group of transformations under which
the lattice in invariant.
symmetry_points (dict): a dictionary of high symmetry points with the
keys as letters and values as lattice coordinates.
symmetry_paths (list): a list of symmetry point pairs used when creating
a band structure plot.
type (str): the Bravais lattice type.
volume (float): the volume of the parallelepiped given by the three
lattice vectors
reciprocal_volume (float): the volume of the parallelepiped given by the three
reciprocal lattice vectors
"""
def __init__(self, centering_type, lattice_constants, lattice_angles,
convention="ordinary", rtol=1e-5, atol=1e-8, eps=1e-10):
self.rtol = rtol
self.atol = atol
self.eps = eps
self.centering = centering_type
self.constants = lattice_constants
self.angles = lattice_angles
self.type = find_lattice_type(centering_type, lattice_constants,
lattice_angles)
self.vectors = make_lattice_vectors(self.type, lattice_constants,
lattice_angles)
self.reciprocal_vectors = make_rptvecs(self.vectors, convention)
self.symmetry_group = get_point_group(self.vectors, rtol=self.rtol,
atol=self.atol, eps=self.eps)
# self.symmetry_group = find_point_group(self.vectors)
self.symmetry_points = get_sympts(centering_type, lattice_constants,
lattice_angles, convention=convention)
self.symmetry_paths = get_sympaths(centering_type, lattice_constants,
lattice_angles, convention=convention)
self.volume = det(self.vectors)
self.reciprocal_volume = det(self.reciprocal_vectors)
# Define the symmetry points for a simple-cubic lattice in lattice coordinates.
sc_sympts = {"$\Gamma$": [0. ,0., 0.],
"R": [1./2, 1./2, 1./2],
"X": [0., 1./2, 0.],
"M": [1./2, 1./2, 0.]}
# Define the symmetry points for a fcc lattice in lattice coordinates.
# Coordinates are in lattice coordinates.
fcc_sympts = {"$\Gamma$": [0., 0., 0.], # G is the gamma point.
"K": [3./8, 3./8, 3./4],
"L": [1./2, 1./2, 1./2],
"U": [5./8, 1./4, 5./8],
"W": [1./2, 1./4, 3./4],
"X": [1./2, 0., 1./2]}
# One of the band plots needs the gamma point in the neighboring cell.
mod_fcc_sympts = {"$\Gamma$": [0., 0., 0.], # G is the gamma point.
"K": [3./8, 3./8, 3./4],
"L": [1./2, 1./2, 1./2],
"U": [5./8, 1./4, 5./8],
"W": [1./2, 1./4, 3./4],
"X": [1./2, 0., 1./2],
"G2":[1., 1., 1.]}
# Define the symmetry points for a bcc lattice in lattice coordinates
bcc_sympts = {"$\Gamma$": [0., 0., 0.],
"H": [1./2, -1./2, 1./2],
"P": [1./4, 1./4, 1./4],
"N": [0., 0., 1./2]}
# Tetragonal high symmetry points
tet_sympts = {"$\Gamma$": [0., 0., 0.],
"A": [1./2, 1./2, 1./2],
"M": [1./2, 1./2, 0.],
"R": [0., 1./2, 1./2],
"X": [0., 1./2, 0.],
"Z": [0., 0., 1./2]}
def bct1_sympts(a, c):
"""Return the body-centered tetragonal high symmetry points for c < a as a
dictionary.
"""
eta = (1. + c**2/a**2)/4.
return {"$\Gamma$": [0., 0., 0.],
"M": [-1./2, 1./2, 1./2],
"N": [0., 1./2, 0.],
"P": [1./4, 1./4, 1./4],
"X": [0., 0., 1./2],
"Z": [eta, eta, -eta],
"Z1": [-eta, 1-eta, eta]}
def bct2_sympts(a, c):
"""Return the body-centered tetragonal high symmetry points for a < c
as a dictionary.
"""
eta = (1. + a**2/c**2)/4.
zeta = a**2/(2*c**2)
return {"$\Gamma$": [0., 0., 0.],
"N": [0., 1./2, 0.],
"P": [1./4, 1./4, 1./4],
"S": [-eta, eta, eta], # Sigma
"S1": [eta, 1-eta, -eta], # Sigma_1
"X": [0., 0., 1./2],
"Y": [-zeta, zeta, 1./2],
"Y1": [1./2, 1./2, -zeta],
"Z": [1./2, 1./2, -1./2]}
# Orthorhombic high symmetry points
orc_sympts = {"$\Gamma$": [0., 0., 0.],
"R": [1./2, 1./2, 1./2],
"S": [1./2, 1./2, 0.],
"T": [0., 1./2, 1./2],
"U": [1./2, 0., 1./2],
"X": [1./2, 0., 0.],
"Y": [0., 1./2, 0.],
"Z": [0., 0., 1./2]}
def orcf13_sympts(a, b, c):
"""Return the face-centered orthorhombic high symmetry points for
1/a**2 > 1/b**2 +1/c**2 and 1/a**2 = 1/b**2 +1/c**2 as a dictionary.
"""
a = float(a)
b = float(b)
c = float(c)
zeta = (1 + (a/b)**2 - (a/c)**2)/4.
eta = (1 + (a/b)**2 + (a/c)**2)/4.
return {"$\Gamma$": [0., 0., 0.],
"A": [1./2, 1./2+zeta, zeta],
"A1": [1./2, 1./2 - zeta, 1 - zeta],
"L": [1./2, 1./2, 1./2],
"T": [1., 1./2, 1./2],
"X": [0., eta, eta],
"X1": [1., 1-eta, 1-eta],
"Y": [1./2, 0., 1./2],
"Z": [1./2, 1./2, 0.]}
def orcf2_sympts(a, b, c):
"""Return the face-centered orthorhombic high symmetry points for
1/a**2 < 1/b**2 +1/c**2 as a dictionary.
"""
a = float(a)
b = float(b)
c = float(c)
eta = (1 + a**2/b**2 - a**2/c**2)/4
phi = (1 + c**2/b**2 - c**2/a**2)/4
delta = (1 + b**2/a**2 - b**2/c**2)/4
return {"$\Gamma$": [0., 0., 0.],
"C": [1./2, 1./2 - eta, 1. - eta],
"C1": [1./2, 1./2 + eta, eta],
"D": [1./2 - delta, 1./2, 1. - delta],
"D1": [1./2 + delta, 1./2, delta],
"L": [1./2, 1./2, 1./2],
"H": [1 - phi, 1./2 - phi, 1./2],
"H1": [phi, 1./2 + phi, 1./2],
"X": [0., 1./2, 1./2],
"Y": [1./2, 0., 1./2],
"Z": [1./2, 1./2, 0.]}
def orci_sympts(a, b, c):
"""Return the body-centered orthorhombic high symmetry points.
"""
a = float(a)
b = float(b)
c = float(c)
zeta = (1 + a**2/c**2)/4
eta = (1 + b**2/c**2)/4
delta = (b**2 - a**2)/(4*c**2)
mu = (a**2 + b**2)/(4*c**2)
return {"$\Gamma$": [0., 0., 0.],
"L": [-mu, mu, 1./2 - delta],
"L1": [mu, -mu, 1./2 + delta],
"L2": [1./2 - delta, 1./2 + delta, -mu],
"R": [0., 1./2, 0.],
"S": [1./2, 0., 0.],
"T": [0., 0., 1./2],
"W": [1./4, 1./4, 1./4],
"X": [-zeta, zeta, zeta],
"X1": [zeta, 1-zeta, -zeta],
"Y": [eta, -eta, eta],
"Y1": [1-eta, eta, -eta],
"Z": [1./2, 1./2, -1./2]}
def orcc_sympts(a, b):
"""Return the base-centered orthorhombic high symmetry points.
"""
a = float(a)
b = float(b)
zeta = (1 + a**2/b**2)/4
return {"$\Gamma$": [0., 0., 0.],
"A": [zeta, zeta, 1./2],
"A1": [-zeta, 1-zeta, 1./2],
"R": [0., 1./2, 1./2],
"S": [0., 1./2, 0.],
"T": [-1./2, 1./2, 1./2],
"X": [zeta, zeta, 0],
"X1": [-zeta, 1-zeta, 0],
"Y": [-1./2, 1./2, 0.],
"Z": [0., 0., 1./2]}
# High symmetry points for a hexagonal lattice.
hex_sympts = {"$\Gamma$": [0., 0., 0.],
"A": [0., 0., 1./2],
"H": [1./3, 1./3, 1./2],
"K": [1./3, 1./3, 0.],
"L": [1./2, 0., 1./2],
"M": [1./2, 0., 0.]}
def rhl1_sympts(alpha):
"""Return the rhombohedral lattice points for alpha < pi/2 radians.
"""
alpha = float(alpha)
eta = (1 + 4*np.cos(alpha))/(2 + 4*np.cos(alpha))
nu = 3./4 - eta/2
return {"$\Gamma$": [0., 0., 0.],
"B": [eta, 1./2, 1-eta],
"B1": [1./2, 1-eta, eta-1],
"F": [1./2, 1./2, 0.],
"L": [1./2, 0., 0.],
"L1": [0., 0., -1./2],
"P": [eta, nu, nu],
"P1": [1-nu, 1-nu, 1-eta],
"P2": [nu, nu, eta-1],
"Q": [1-nu, nu, 0],
"X": [nu, 0, -nu],
"Z": [1./2, 1./2, 1./2]}
def rhl2_sympts(alpha):
"""Return the rhombohedral lattice points for alpha > pi/2 radians.
"""
alpha = float(alpha)
eta = 1/(2*np.tan(alpha/2)**2)
nu = 3./4 - eta/2
return {"$\Gamma$": [0., 0., 0.],
"F": [1./2, -1./2, 0.],
"L": [1./2, 0., 0.],
"P": [1-nu, -nu, 1-nu],
"P1": [nu, nu-1, nu-1],
"Q": [eta, eta, eta],
"Q1": [1-eta, -eta, -eta],
"Z": [1./2, -1./2, 1./2]}
def mcl_sympts(b, c, alpha):
"""Return the high symmetry points for the monoclinic lattice as a
dictionary where the keys are strings the values are the lattice coordinates
of the high symmetry points.
"""
b = float(b)
c = float(c)
alpha = float(alpha)
eta = (1 - b*np.cos(alpha)/c)/(2*np.sin(alpha)**2)
nu = 1./2 - eta*c*np.cos(alpha)/b
return {"$\Gamma$": [0., 0., 0.],
"A": [1./2, 1./2, 0.],
"C": [0., 1./2, 1./2],
"D": [1./2, 0., 1./2],
"D1": [1./2, 0., -1./2],
"E": [1./2, 1./2, 1./2],
"H": [0., eta, 1-nu],
"H1": [0., 1-eta, nu],
"H2": [0, eta, -nu],
"M": [1./2, eta, 1-nu],
"M1": [1./2, 1-eta, nu],
"M2": [1./2, eta, -nu],
"X": [0., 1./2, 0.],
"Y": [0., 0., 1./2],
"Y1": [0., 0., -1./2],
"Z": [1./2, 0., 0.]}
def mclc12_sympts(a, b, c, alpha):
"""Return the high symmetry points for a base-centered monoclinic lattice
with kgamma > pi/2 and kgamma = pi/2 as a dictionary where the keys are
strings the values are the lattice coordinates of the high symmetry points.
"""
a = float(a)
b = float(b)
c = float(c)
alpha = float(alpha)
zeta = (2 - b*np.cos(alpha)/c)/(4*np.sin(alpha)**2)
eta = 1./2 + 2*zeta*c*np.cos(alpha)/b
psi = 3./4 - a**2/(4*b**2*np.sin(alpha)**2)
phi = psi + (3./4 - psi)*b*np.cos(alpha)/c
return {"$\Gamma$": [0., 0., 0.],
"N": [1./2, 0., 0.],
"N1": [0., -1./2, 0.],
"F": [1-zeta, 1-zeta, 1-eta],
"F1": [zeta, zeta, eta],
"F2": [-zeta, -zeta, 1-eta],
"F3": [1-zeta, -zeta, 1-eta],
"I": [phi, 1-phi, 1./2],
"I1": [1-phi, phi-1, 1./2],
"L": [1./2, 1./2, 1./2],
"M": [1./2, 0., 1./2],
"X": [1-psi, psi-1, 0.],
"X1": [psi, 1-psi, 0.],
"X2": [psi-1, -psi, 0.],
"Y": [1./2, 1./2, 0.],
"Y1": [-1./2, -1./2, 0.],
"Z": [0., 0., 1./2]}
def mclc34_sympts(a, b, c, alpha):
"""Return the high symmetry points for a base-centered monoclinic lattice
with gamma < pi/2 and b*cos(alpha/c) + b**2*sin(alpha/a**2)**2 <= 1 (3 is < 1, 4 = 1) as
a dictionary where the keys are strings the values are the lattice
coordinates of the high symmetry points.
"""
a = float(a)
b = float(b)
c = float(c)
alpha = float(alpha)
mu = (1 + b**2/a**2)/4
delta = b*c*np.cos(alpha)/(2*a**2)
zeta = mu - 1./4 + (1 - b*np.cos(alpha)/c)/(4*np.sin(alpha)**2)
eta = 1./2 + 2*zeta*c*np.cos(alpha)/b
phi = 1 + zeta - 2*mu
psi = eta - 2*delta
return {"$\Gamma$": [0., 0., 0.],
"F": [1-phi, 1-phi, 1-psi],
"F1": [phi, phi-1, psi],
"F2": [1-phi, -phi, 1-psi],
"H": [zeta, zeta, eta],
"H1": [1-zeta, -zeta, 1-eta],
"H2": [-zeta, -zeta, 1-eta],
"I": [1./2, -1./2, 1./2],
"M": [1./2, 0., 1./2],
"N": [1./2, 0., 0.],
"N1": [0., -1./2, 0.],
"X": [1./2, -1./2, 0.],
"Y": [mu, mu, delta],
"Y1": [1-mu, -mu, -delta],
"Y2": [-mu, -mu, -delta],
"Y3": [mu, mu-1, delta],
"Z": [0., 0., 1./2]}
def mclc5_sympts(a, b, c, alpha):
"""Return the high symmetry points for a base-centered monoclinic lattice
with gamma < pi/2 and b*cos(alpha/c) + b**2*sin(alpha/a**2)**2 > 1 as
a dictionary where the keys are strings the values are the lattice
coordinates of the high symmetry points.
"""
a = float(a)
b = float(b)
c = float(c)
alpha = float(alpha)
zeta = (b**2/a**2 + (1 - b*np.cos(alpha)/c)/np.sin(alpha)**2)/4
eta = 1./2 + 2*zeta*c*np.cos(alpha)/b
mu = eta/2 + b**2/(4*a**2) - b*c*np.cos(alpha)/(2*a**2)
nu = 2*mu - zeta
omega = (4*nu - 1 - b**2*np.sin(alpha)**2/a**2)*c/(2*b*np.cos(alpha))
delta = zeta*c*np.cos(alpha)/b + omega/2 - 1./4
rho = 1 - zeta*a**2/b**2
return {"$\Gamma$": [0., 0., 0.],
"F": [nu, nu, omega],
"F1": [1-nu, 1-nu, 1-omega],
"F2": [nu, nu-1, omega],
"H": [zeta, zeta, eta],
"H1": [1-zeta, -zeta, 1-eta],
"H2": [-zeta, -zeta, 1-eta],
"I": [rho, 1-rho, 1./2],
"I1": [1-rho, rho-1, 1./2],
"L": [1./2, 1./2, 1./2],
"M": [1./2, 0., 1./2],
"N": [1./2, 0., 0.],
"N1": [0., -1./2, 0.],
"X": [1./2, -1./2, 0.],
"Y": [mu, mu, delta],
"Y1": [1-mu, -mu, -delta],
"Y2": [-mu, -mu, -delta],
"Y3": [mu, mu-1, delta],
"Z": [0., 0., 1./2]}
# Triclinic symmetry points with lattice parameters that satisfy
## tri1a ##
# k_alpha > pi/2
# k_beta > pi/2
# k_gamma > pi/2 where k_gamma = min(k_alpha, k_beta, k_gamma)
## tri2a ##
# k_alpha > pi/2
# k_beta > pi/2
# k_gamma = pi/2
tri1a2a_sympts = {"$\Gamma$": [0., 0., 0.],
"L": [1./2, 1./2, 0.],
"M": [0., 1./2, 1./2],
"N": [1./2, 0., 1./2],
"R": [1./2, 1./2, 1./2],
"X": [1./2, 0., 0.],
"Y": [0., 1./2, 0.],
"Z": [0., 0., 1./2]}
# Triclinic symmatry points with lattice parameters that satisfy
## tri1b ##
# k_alpha < pi/2
# k_beta < pi/2
# k_gamma < pi/2 where k_gamma = max(k_alpha, k_beta, k_gamma)
## tri2b ##
# k_alpha < pi/2
# k_beta < pi/2
# k_gamma = pi/2
tr1b2b_sympts = {"$\Gamma$": [0., 0., 0.],
"L": [1./2, -1./2, 0.],
"M": [0., 0., 1./2],
"N": [-1./2, -1./2, 1./2],
"R": [0., -1./2, 1./2],
"X": [0., -1./2, 0.],
"Y": [1./2, 0., 0.],
"Z": [-1./2, 0., 1./2]}
def get_sympts(centering_type, lattice_constants, lattice_angles,
convention="ordinary"):
"""Find the symmetry points for the provided lattice.
Args:
centering_type (str): the centering type for the lattice. Vaild
options include 'prim', 'base', 'body', and 'face'.
lattice_constants (list): a list of lattice constants [a, b, c].
lattice_angles (list): a list of lattice angles [alpha, beta, gamma].
convention (str): indicates the convention used in defining the reciprocal
lattice vectors. Options include 'ordinary' and 'angular'.
Returns:
(dict): a dictionary with a string of letters as the keys and lattice
coordinates of the symmetry points ase values.
Example:
>>> lattice_constants = [4.05]*3
>>> lattice_angles = [numpy.pi/2]*3
>>> symmetry_points = get_sympts(lattice_constants, lattice_angles)
"""
a = float(lattice_constants[0])
b = float(lattice_constants[1])
c = float(lattice_constants[2])
alpha = float(lattice_angles[0])
beta = float(lattice_angles[1])
gamma = float(lattice_angles[2])
lattice_vectors = make_ptvecs(centering_type, lattice_constants,
lattice_angles)
reciprocal_lattice_vectors = make_rptvecs(lattice_vectors, convention=convention)
rlat_veca = reciprocal_lattice_vectors[:,0] # individual reciprocal lattice vectors
rlat_vecb = reciprocal_lattice_vectors[:,1]
rlat_vecc = reciprocal_lattice_vectors[:,2]
ka = norm(rlat_veca) # lengths of primitive reciprocal lattice vectors
kb = norm(rlat_vecb)
kc = norm(rlat_vecc)
# These are the angles between reciprocal lattice vectors.
kalpha = np.arccos(np.dot(rlat_vecb, rlat_vecc)/(kb*kc))
kbeta = np.arccos(np.dot(rlat_veca, rlat_vecc)/(ka*kc))
kgamma = np.arccos(np.dot(rlat_veca, rlat_vecb)/(ka*kb))
# Start with the cubic lattices, which have all angles equal to pi/2 radians.
if (np.isclose(alpha, np.pi/2) and
np.isclose(beta, np.pi/2) and
np.isclose(gamma, np.pi/2)):
if (np.isclose(a, b) and
np.isclose(b, c)):
if centering_type == "prim":
return sc_sympts
elif centering_type == "body":
return bcc_sympts
elif centering_type == "face":
return fcc_sympts
else:
msg = ("Valid lattice centerings for cubic latices include "
"'prim', 'body', and 'face'.")
raise ValueError(msg.format(centering_type))
# Tetragonal.
elif (np.isclose(a,b) and not np.isclose(b,c)):
if centering_type == "prim":
return tet_sympts
elif centering_type == "body":
if c < a:
return bct1_sympts(a, c)
else:
return bct2_sympts(a, c)
else:
msg = ("Valid lattice centerings for tetragonal lattices "
"include 'prim' and 'body'.")
raise ValueError(msg.format(centering_type))
# Last of the lattices with all angles equal to pi/2 is orthorhombic.
else:
if centering_type == "prim":
return orc_sympts
elif centering_type == "base":
return orcc_sympts(a, b)
elif centering_type == "body":
return orci_sympts(a, b, c)
elif centering_type == "face":
if (1/a**2 >= 1/b**2 +1/c**2):
return orcf13_sympts(a, b, c)
else:
return orcf2_sympts(a, b, c)
else:
msg = ("Valid lattice centerings for orthorhombic lattices "
"include 'prim', 'base', 'body', and 'face'.")
raise ValueError(msg.format(centering_type))
# Hexagonal has alpha = beta = pi/2, gamma = 2pi/3, a = b != c.
if (np.isclose(alpha, beta) and np.isclose(beta, np.pi/2) and
np.isclose(gamma, 2*np.pi/3) and np.isclose(a, b) and not
np.isclose(b, c)):
return hex_sympts
# Rhombohedral has equal angles and constants.
elif (np.isclose(alpha, beta) and np.isclose(beta, gamma) and
np.isclose(a, b) and np.isclose(b, c)):
if alpha < np.pi/2:
return rhl1_sympts(alpha)
else:
return rhl2_sympts(alpha)
# Monoclinic a,b <= c, alpha < pi/2, beta = gamma = pi/2, a != b != c
elif (not (a > c or b > c) and np.isclose(beta, gamma) and
np.isclose(beta, np.pi/2) and alpha < np.pi/2):
if centering_type == "prim":
return mcl_sympts(b, c, alpha)
elif centering_type == "base":
if kgamma > np.pi/2 or np.isclose(kgamma, np.pi/2):
return mclc12_sympts(a, b, c, alpha)
elif (kgamma < np.pi/2
and ((b*np.cos(alpha)/c + (b*np.sin(alpha)/a)**2) < 1.
or np.isclose(b*np.cos(alpha)/c + (b*np.sin(alpha)/a)**2, 1))):
return mclc34_sympts(a, b, c, alpha)
elif (kgamma < np.pi/2 and
(b*np.cos(alpha)/c + (b*np.sin(alpha)/a)**2) > 1.):
return mclc5_sympts(a, b, c, alpha)
else:
msg = "Something is wrong with the monoclinic lattice provided."
raise ValueError(msg.format(reciprocal_lattice_vectors))
else:
msg = ("Valid lattice centerings for monoclinic lattices "
"include 'prim' and 'base'")
raise ValueError(msg.format(centering_type))
# Triclinic a != b != c, alpha != beta != gamma
elif not (np.isclose(a,b) and np.isclose(b,c) and np.isclose(alpha,beta) and
np.isclose(beta, gamma)):
if ((kalpha > np.pi/2 and kbeta > np.pi/2 and kgamma > np.pi/2) or
(kalpha > np.pi/2 and kbeta > np.pi/2 and np.isclose(kgamma, np.pi/2))):
return tri1a2a_sympts
elif ((kalpha < np.pi/2 and kbeta < np.pi/2 and kgamma < np.pi/2) or
(kalpha < np.pi/2 and kbeta < np.pi/2 and np.isclose(kgamma, np.pi/2))):
return tr1b2b_sympts
else:
msg = "Something is wrong with the triclinic lattice provided."
raise ValueError(msg.format(reciprocal_lattice_vectors))
else:
msg = ("The lattice parameters provided don't correspond to a valid "
"3D Bravais lattice.")
raise ValueError(msg.format())
def get_sympaths(centering_type, lattice_constants, lattice_angles,
convention="ordinary"):
"""Find the symmetry paths for the provided lattice.
Args:
centering_type (str): the centering type for the lattice. Vaild
options include 'prim', 'base', 'body', and 'face'.
lattice_constants (list): a list of lattice constants [a, b, c].
lattice_angles (list): a list of lattice angles [alpha, beta, gamma].
convention (str): indicates the convention used in defining the reciprocal
lattice vectors. Options include 'ordinary' and 'angular'.
Returns:
(dict): a dictionary with a string of letters as the keys and lattice
coordinates of the symmetry points as values.
Example:
>>> lattice_constants = [4.05]*3
>>> lattice_angles = [numpy.pi/2]*3
>>> symmetry_points = get_sympts(lattice_constants, lattice_angles)
"""
a = float(lattice_constants[0])
b = float(lattice_constants[1])
c = float(lattice_constants[2])
alpha = float(lattice_angles[0])
beta = float(lattice_angles[1])
gamma = float(lattice_angles[2])
lattice_vectors = make_ptvecs(centering_type, lattice_constants,
lattice_angles)
reciprocal_lattice_vectors = make_rptvecs(lattice_vectors, convention=convention)
rlat_veca = reciprocal_lattice_vectors[:,0] # individual reciprocal lattice vectors
rlat_vecb = reciprocal_lattice_vectors[:,1]
rlat_vecc = reciprocal_lattice_vectors[:,2]
ka = norm(rlat_veca) # lengths of primitive reciprocal lattice vectors
kb = norm(rlat_vecb)
kc = norm(rlat_vecc)
# These are the angles between reciprocal lattice vectors.
kalpha = np.arccos(np.dot(rlat_vecb, rlat_vecc)/(kb*kc))
kbeta = np.arccos(np.dot(rlat_veca, rlat_vecc)/(ka*kc))
kgamma = np.arccos(np.dot(rlat_veca, rlat_vecb)/(ka*kb))
# Start with the cubic lattices, which have all angles equal to pi/2 radians.
if (np.isclose(alpha, np.pi/2) and
np.isclose(beta, np.pi/2) and
np.isclose(gamma, np.pi/2)):
if (np.isclose(a, b) and
np.isclose(b, c)):
if centering_type == "prim":
return [["$\Gamma$", "X"], ["X", "M"], ["M", "$\Gamma$"], ["$\Gamma$", "R"],
["R", "X"], ["M", "R"]]
elif centering_type == "body":
return [["$\Gamma$", "H"], ["H", "N"], ["N", "$\Gamma$"], ["$\Gamma$", "P"],
["P", "H"], ["P", "N"]]
elif centering_type == "face":
return [["$\Gamma$", "X"], ["X", "W"], ["W", "K"], ["K", "$\Gamma$"],
["$\Gamma$", "L"], ["L", "U"], ["U", "W"], ["W", "L"],
["L", "K"], ["U", "X"]]
else:
msg = ("Valid lattice centerings for cubic latices include "
"'prim', 'body', and 'face'.")
raise ValueError(msg.format(centering_type))
# Tetragonal.
elif (np.isclose(a,b) and not np.isclose(b,c)):
if centering_type == "prim":
return [["$\Gamma$", "X"], ["X", "M"], ["M", "$\Gamma$"], ["$\Gamma$", "Z"],
["Z", "R"], ["R", "A"], ["A", "Z"], ["X", "R"],
["M", "A"]]
elif centering_type == "body":
if c < a:
return [["$\Gamma$", "X"], ["X", "M"], ["M", "$\Gamma$"], ["$\Gamma$", "Z"],
["Z", "P"], ["P", "N"], ["N", "Z1"], ["Z1", "M"],
["X", "P"]]
else:
return [["$\Gamma$", "X"], ["X", "Y"], ["Y", "S"], ["S", "$\Gamma$"],
["$\Gamma$", "Z"], ["Z", "S1"], ["S1", "N"], ["N", "P"],
["P", "Y1"], ["Y1", "Z"], ["X", "P"]]
else:
msg = ("Valid lattice centerings for tetragonal lattices "
"include 'prim' and 'body'.")
raise ValueError(msg.format(centering_type))
# Last of the lattices with all angles equal to pi/2 is orthorhombic.
else:
if centering_type == "prim": # orc
return [["$\Gamma$", "X"], ["X", "S"], ["S", "Y"], ["Y", "$\Gamma$"],
["$\Gamma$", "Z"], ["Z", "U"], ["U", "R"], ["R", "T"],
["T", "Z"], ["Y", "T"], ["U", "X"], ["S", "R"]]
elif centering_type == "base": # orcc
return [["$\Gamma$", "X"], ["X", "S"], ["S", "R"], ["R", "A"],
["A", "Z"], ["Z", "$\Gamma$"], ["$\Gamma$", "Y"], ["Y", "X1"],
["X1", "A1"], ["A1", "T"], ["T", "Y"], ["Z", "T"]]
elif centering_type == "body": # orci
return [["$\Gamma$", "X"], ["X", "L"], ["L", "T"], ["T", "W"],
["W", "R"], ["R", "X1"], ["X1", "Z"], ["Z", "$\Gamma$"],
["$\Gamma$", "Y"], ["Y", "S"], ["S", "W"], ["L1", "Y"],
["Y1", "Z"]]
elif centering_type == "face":
if (1/a**2 > 1/b**2 +1/c**2): # orcf1
return[["$\Gamma$", "Y"], ["Y", "T"], ["T", "Z"], ["Z", "$\Gamma$"],
["$\Gamma$", "X"], ["X", "A1"], ["A1", "Y"], ["T", "X1"],
["X", "A"], ["A", "Z"], ["L", "$\Gamma$"]]
elif np.isclose(1/a**2, 1/b**2 +1/c**2): # orcf3
return [["$\Gamma$", "Y"], ["Y", "T"], ["T", "Z"], ["Z", "$\Gamma$"],
["$\Gamma$", "X"], ["X", "A1"], ["A1", "Y"], ["X", "A"],
["A", "Z"], ["L", "$\Gamma$"]]
else: #orcf2
return [["$\Gamma$", "Y"], ["Y", "C"], ["C", "D"], ["D", "X"],
["X", "$\Gamma$"], ["$\Gamma$", "Z"], ["Z", "D1"], ["D1", "H"],
["H", "C"], ["C1", "Z"], ["X", "H1"], ["H", "Y"],
["L", "$\Gamma$"]]
else:
msg = ("Valid lattice centerings for orthorhombic lattices "
"include 'prim', 'base', 'body', and 'face'.")
raise ValueError(msg.format(centering_type))
# Hexagonal has alpha = beta = pi/2, gamma = 2pi/3, a = b != c.
if (np.isclose(alpha, beta) and np.isclose(beta, np.pi/2) and
np.isclose(gamma, 2*np.pi/3) and np.isclose(a, b) and not
np.isclose(b, c)):
return [["$\Gamma$", "M"], ["M", "K"], ["K", "$\Gamma$"], ["$\Gamma$", "A"], ["A", "L"],
["L", "H"], ["H", "A"], ["L", "M"], ["K", "H"]]
# Rhombohedral has equal angles and constants.
elif (np.isclose(alpha, beta) and np.isclose(beta, gamma) and
np.isclose(a, b) and np.isclose(b, c)):
if alpha < np.pi/2: # RHL1
return [["$\Gamma$", "L"], ["L", "B1"], ["B", "Z"], ["Z", "$\Gamma$"],
["$\Gamma$", "X"], ["Q", "F"], ["F", "P1"], ["P1", "Z"],
["L", "P"]]
else: #RHL2
return [["$\Gamma$", "P"], ["P", "Z"], ["Z", "Q"], ["Q", "$\Gamma$"],
["$\Gamma$", "F"], ["F", "P1"], ["P1", "Q1"], ["Q1", "L"],
["L", "Z"]]
# Monoclinic a,b <= c, alpha < pi/2, beta = gamma = pi/2, a != b != c
elif (not (a > c or b > c) and np.isclose(beta, gamma) and
np.isclose(beta, np.pi/2) and alpha < np.pi/2):
if centering_type == "prim":
return [["$\Gamma$", "Y"], ["Y", "H"], ["H", "C"], ["C", "E"],
["E", "M1"], ["M1", "A"], ["A", "X"], ["X", "H1"],
["M", "D"], ["D", "Z"], ["Y", "D"]]
elif centering_type == "base": # MCLC1
if np.isclose(kgamma, np.pi/2): # MCLC2
return [["$\Gamma$", "Y"], ["Y", "F"], ["F", "L"], ["L", "I"],
["I1", "Z"], ["Z", "F1"], ["N", "$\Gamma$"], ["$\Gamma$", "M"]]
elif kgamma > np.pi/2:
return [["$\Gamma$", "Y"], ["Y", "F"], ["F", "L"], ["L", "I"],
["I1", "Z"], ["Z", "F1"], ["Y", "X1"], ["X", "$\Gamma$"],
["$\Gamma$", "N"], ["M", "$\Gamma$"]]
elif (kgamma < np.pi/2 # MCLC3
and ((b*np.cos(alpha)/c + (b*np.sin(alpha)/a)**2) < 1)):
return [["$\Gamma$", "Y"], ["Y", "F"], ["F", "H"], ["H", "Z"],
["Z", "I"], ["I", "F1"], ["H1", "Y1"], ["Y1", "X"],
["X", "$\Gamma$"], ["$\Gamma$", "N"], ["M", "$\Gamma$"]]
elif (kgamma < np.pi/2 and # MCLC4
np.isclose(b*np.cos(alpha)/c + (b*np.sin(alpha)/a)**2, 1)):
return [["$\Gamma$", "Y"], ["Y", "F"], ["F", "H"], ["H", "Z"],
["Z", "I"], ["H1", "Y1"], ["Y1", "X"], ["X", "$\Gamma$"],
["$\Gamma$", "N"], ["M", "$\Gamma$"]]
elif (kgamma < np.pi/2 and # MCLC5
(b*np.cos(alpha)/c + (b*np.sin(alpha)/a)**2) > 1.):
return [["$\Gamma$", "Y"], ["Y", "F"], ["F", "L"], ["L", "I"],
["I1", "Z"], ["Z", "H"], ["H", "F1"], ["H1", "Y1"],
["Y1", "X"], ["X", "$\Gamma$"], ["$\Gamma$", "N"], ["M", "$\Gamma$"]]
else:
msg = "Something is wrong with the monoclinic lattice provided."
raise ValueError(msg.format(reciprocal_lattice_vectors))
else:
msg = ("Valid lattice centerings for monoclinic lattices "
"include 'prim' and 'base'")
raise ValueError(msg.format(centering_type))
# Triclinic a != b != c, alpha != beta != gamma
elif not (np.isclose(a,b) and np.isclose(b,c) and np.isclose(a,c) and
np.isclose(alpha,beta) and np.isclose(beta, gamma) and
np.isclose(alpha, gamma)):
kangles = np.sort([kalpha, kbeta, kgamma])
if kangles[0] > np.pi/2: # TRI1a
return [["X", "$\Gamma$"], ["$\Gamma$", "Y"], ["L", "$\Gamma$"], ["$\Gamma$", "Z"], ["N", "$\Gamma$"],
["$\Gamma$", "M"], ["R", "$\Gamma$"]]
elif kangles[2] < np.pi/2: #TRI1b
return [["X", "$\Gamma$"], ["$\Gamma$", "Y"], ["L", "$\Gamma$"], ["$\Gamma$", "Z"],
["N", "$\Gamma$"], ["$\Gamma$", "M"], ["R", "$\Gamma$"]]
elif (np.isclose(kangles[0], np.pi/2) and (kangles[1] > np.pi/2) and
(kangles[2] > np.pi/2)): #TRI2a
return [["X", "$\Gamma$"], ["$\Gamma$", "Y"], ["L", "$\Gamma$"], ["$\Gamma$", "Z"], ["N", "$\Gamma$"],
["$\Gamma$", "M"], ["R", "$\Gamma$"]]
elif (np.isclose(kangles[2], np.pi/2) and (kangles[0] < np.pi/2) and
(kangles[1] < np.pi/2)): #TRI2b
return [["X", "$\Gamma$"], ["$\Gamma$", "Y"], ["L", "$\Gamma$"], ["$\Gamma$", "Z"],
["N", "$\Gamma$"], ["$\Gamma$", "M"], ["R", "$\Gamma$"]]
else:
msg = "Something is wrong with the triclinic lattice provided."
raise ValueError(msg.format(reciprocal_lattice_vectors))
else:
msg = ("The lattice parameters provided don't correspond to a valid "
"3D Bravais lattice.")
raise ValueError(msg.format())
def make_ptvecs(center_type, lat_consts, lat_angles):
"""Provided the center type, lattice constants and angles of the conventional unit
cell, return the primitive translation vectors.
Args:
center_type (str): identifies the location of the atoms in the cell.
lat_consts (float or int): the characteristic spacing of atoms in the
material with 'a' first, 'b' second, and 'c' third in the list. These
are typically ordered such that a < b < c.
angles (list): a list of angles between the primitive translation vectors,
in radians, with 'alpha' the first entry, 'beta' the second, and 'gamma' the
third in the list.
Returns:
lattice_vectors (numpy.ndarray): returns the primitive translation vectors as
the columns of a matrix.
Example:
>>> center_type = "prim"
>>> lat_consts = [1.2]*3
>>> angles = [np.pi/2]*3
>>> vectors = make_ptvecs(lattice_type, lat_consts, angles)
"""
if type(lat_consts) not in (list, np.ndarray):
raise ValueError("The lattice constants must be in a list or numpy "
"array.")
if type(lat_angles) not in (list, np.ndarray):
raise ValueError("The lattice angles must be in a list or numpy array.")
if (np.sum(np.sort(lat_angles)[:2]) < max(lat_angles) or
np.isclose(np.sum(np.sort(lat_angles)[:2]), max(lat_angles))):
msg = ("The sum of the two smallest lattice angles must be greater than "
"the largest lattice angle for the lattice vectors to be "
"linearly independent.")
raise ValueError(msg.format(lat_angles))
# Extract the angles
alpha = float(lat_angles[0])
beta = float(lat_angles[1])
gamma = float(lat_angles[2])
if (np.isclose(alpha, beta) and np.isclose(beta, gamma) and
np.isclose(beta, 2*np.pi/3)):
msg = ("The lattice vectors are linearly dependent with all angles "
"equal to 2pi/3.")
raise ValueError(msg.format(lat_angles))
# Extract the lattice constants for the conventional lattice.
a = float(lat_consts[0])
b = float(lat_consts[1])
c = float(lat_consts[2])
# avec is chosen to lie along the x-direction.
avec = np.array([a, 0., 0.])
# bvec is chosen to lie in the xy-plane.
bvec = np.array([b*np.cos(gamma), b*np.sin(gamma), 0])
# I had to round the argument of the sqrt function in order to avoid
# numerical errors in cvec.
cvec = np.array([c*np.cos(beta),
c/np.sin(gamma)*(np.cos(alpha) -
np.cos(beta)*np.cos(gamma)),
np.sqrt(np.round(c**2 - (c*np.cos(beta))**2 -
(c/np.sin(gamma)*(np.cos(alpha) -
np.cos(beta)*np.cos(gamma)))**2, 15))])
if center_type == "prim":
# I have to take care that a hexagonal grid is rotated 60 degrees so
# it matches what was obtained in Stefano's paper.
if ((np.isclose(a, b) and not np.isclose(b,c)) and
np.isclose(alpha, beta) and np.isclose(beta, np.pi/2) and
np.isclose(gamma, 2*np.pi/3)):
rotate = [[np.cos(gamma/2), np.sin(gamma/2), 0],
[-np.sin(gamma/2), np.cos(gamma/2), 0],
[0, 0, 1]]
av = np.dot(rotate, avec)
bv = np.dot(rotate, bvec)
cv = np.dot(rotate, cvec)
pt_vecs = np.transpose(np.array([av, bv, cv], dtype=float))
return pt_vecs
# The rhombohedral lattice vectors also need to be rotated to match
# those of Stefano.
elif (np.isclose(alpha, beta) and np.isclose(beta, gamma) and
not np.isclose(beta, np.pi/2) and np.isclose(a, b) and
np.isclose(b,c)):
# The vectors in Stefano's paper are mine rotated 60 degrees.
rotate = [[np.cos(gamma/2), np.sin(gamma/2), 0],
[-np.sin(gamma/2), np.cos(gamma/2), 0],
[0, 0, 1]]
av = np.dot(rotate, avec)
bv = np.dot(rotate, bvec)
cv = np.dot(rotate, cvec)
pt_vecs = np.transpose(np.array([av, bv, cv], dtype=float))
return pt_vecs
else:
pt_vecs = np.transpose(np.array([avec, bvec, cvec], dtype=float))
return pt_vecs
elif center_type == "base":
av = .5*(avec - bvec)
bv = .5*(avec + bvec)
cv = cvec
# The vectors defined in Stefano's paper are defined
# differently for base-centered, monoclinic lattices.
if (alpha < np.pi/2 and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2) and a <= c and b <= c
and not (np.isclose(a,b) or np.isclose(b,c) or np.isclose(a,c))):
av = .5*(avec + bvec)
bv = .5*(-avec + bvec)
cv = cvec
pt_vecs = np.transpose(np.array([av, bv, cv], dtype=float))
return pt_vecs
elif (not (a > c or b > c) and np.isclose(beta, gamma) and
np.isclose(beta, np.pi/2) and alpha < np.pi/2):
av = .5*(avec + bvec)
bv = .5*(-avec + bvec)
cv = cvec
pt_vecs = np.transpose(np.array([av, bv, cv], dtype=float))
return pt_vecs
elif center_type == "body":
av = .5*(-avec + bvec + cvec)
bv = .5*(avec - bvec + cvec)
cv = .5*(avec + bvec - cvec)
pt_vecs = np.transpose(np.array([av, bv, cv], dtype=float))
return pt_vecs
elif center_type == "face":
av = .5*(bvec + cvec)
bv = .5*(avec + cvec)
cv = .5*(avec + bvec)
pt_vecs = np.transpose(np.array([av, bv, cv], dtype=float))
return pt_vecs
else:
msg = "Please provide a valid centering type."
raise ValueError(msg.format(center_type))
def make_rptvecs(A, convention="ordinary"):
"""Return the reciprocal primitive translation vectors of the provided
vectors.
Args:
A (list or numpy.ndarray): the primitive translation vectors in real space
as the columns of a nested list or numpy array.
convention (str): gives the convention that defines the reciprocal lattice vectors.
This is really the difference between using ordinary frequency and angular
frequency, and whether the transformation between real and reciprocal space is
unitary.
Return:
B (numpy.ndarray): return the primitive translation vectors in
reciprocal space as the columns of a matrix.
"""
if convention == "ordinary":
return np.transpose(np.linalg.inv(A))
elif convention == "angular":
return np.transpose(np.linalg.inv(A))*2*np.pi
else:
msg = "The two allowed conventions are 'ordinary' and 'angular'."
raise ValueError(msg.format(convention))
def make_lattice_vectors(lattice_type, lattice_constants, lattice_angles):
"""Create the vectors that generate a lattice.
Args:
lattice_type (str): the lattice type.
lattice_constants (list or numpy.ndarray): the axial lengths of the
conventional lattice vectors.
lattice_angles (list or numpy.ndarray): the interaxial angles of the
conventional lattice vectors.
Returns:
lattice_vectors (numpy.ndarray): the vectors that generate the lattice
as columns of an array [a1, a2, a3] where a1, a2, and a3 are column
vectors.
Example:
>>> lattice_type = "face-centered cubic"
>>> lattice_constants = [1]*3
>>> lattice_angles = [numpy.pi/2]*3
>>> lattice_vectors = make_lattice_vectors(lattice_type,
lattice_constants,
lattice_angles)
"""
# Extract parameters.
a = float(lattice_constants[0])
b = float(lattice_constants[1])
c = float(lattice_constants[2])
alpha = float(lattice_angles[0])
beta = float(lattice_angles[1])
gamma = float(lattice_angles[2])
if lattice_type == "simple cubic":
if not ((np.isclose(a, b) and np.isclose(b, c))):
msg = ("The lattice constants should all be the same for a simple-"
"cubic lattice")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, np.pi/2) and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2)):
msg = ("The lattice angles should all be the same and equal to pi/2"
" for a simple-cubic lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [a, 0, 0]
a2 = [0, a, 0]
a3 = [0, 0, a]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "face-centered cubic":
if not ((np.isclose(a, b) and np.isclose(b, c))):
msg = ("The lattice constants should all be the same for a face-"
"centered, cubic lattice.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, np.pi/2) and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2)):
msg = ("The lattice angles should all be the same and equal to pi/2"
" for a face-centered, cubic lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [ 0, a/2, a/2]
a2 = [a/2, 0, a/2]
a3 = [a/2, a/2, 0]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "body-centered cubic":
if not ((np.isclose(a, b) and np.isclose(b, c))):
msg = ("The lattice constants should all be the same for a body-"
"centered, cubic lattice.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, np.pi/2) and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2)):
msg = ("The lattice angles should all be the same and equal to pi/2"
" for a body-centered, cubic lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [-a/2, a/2, a/2]
a2 = [ a/2, -a/2, a/2]
a3 = [ a/2, a/2, -a/2]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "tetragonal":
if not (np.isclose(a, b) and
not np.isclose(b, c)):
msg = ("For tetragonal lattice, a = b != c where a, b, and c are "
"the first, second, and third entries in lattice_constants, "
"respectively.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, np.pi/2) and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2)):
msg = ("The lattice angles should all be the same and equal to pi/2"
" for a tetragonal lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [a, 0, 0]
a2 = [0, a, 0]
a3 = [0, 0, c]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "body-centered tetragonal":
if not (np.isclose(a, b) and
not np.isclose(b, c)):
msg = ("For a body-centered, tetragonal lattice, a = b != c where "
"a, b, and c are the first, second, and third entries in "
"lattice_constants, respectively.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, np.pi/2) and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2)):
msg = ("The lattice angles should all be the same and equal to pi/2"
" for a body-centered, tetragonal lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [-a/2, a/2, c/2]
a2 = [ a/2, -a/2, c/2]
a3 = [ a/2, a/2, -c/2]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "orthorhombic":
if (np.isclose(a, b) or np.isclose(b, c) or np.isclose(a, c)):
msg = ("The lattice constants should all be different for an "
"orthorhombic lattice.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, np.pi/2) and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2)):
msg = ("The lattice angles should all be the same and equal to pi/2 "
"for an orthorhombic lattice.")
raise ValueError(msg.format(lattice_angles))
if (b < a) or (c < b):
msg = ("The lattice constants should in ascending order for an "
"orthorhombic lattice.")
raise ValueError(msg.format(lattice_constants))
a1 = [a, 0, 0]
a2 = [0, b, 0]
a3 = [0, 0, c]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "face-centered orthorhombic":
if (np.isclose(a, b) or np.isclose(b, c) or np.isclose(a, c)):
msg = ("The lattice constants should all be different for a "
"face-centered, orthorhombic lattice.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, np.pi/2) and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2)):
msg = ("The lattice angles should all be the same and equal to pi/2"
" for a face-centered, orthorhombic lattice.")
raise ValueError(msg.format(lattice_angles))
if not (a < b < c):
msg = ("The lattice constants should in ascending order for a ."
"face-centered, orthorhombic lattice.")
raise ValueError(msg.format(lattice_constants))
a1 = [ 0, b/2, c/2]
a2 = [a/2, 0, c/2]
a3 = [a/2, b/2, 0]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "body-centered orthorhombic":
if (np.isclose(a, b) or np.isclose(b, c) or np.isclose(a, c)):
msg = ("The lattice constants should all be different for a "
"body-centered, orthorhombic lattice.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, np.pi/2) and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2)):
msg = ("The lattice angles should all be the same and equal to pi/2"
" for a body-centered, orthorhombic lattice.")
raise ValueError(msg.format(lattice_angles))
if not (a < b < c):
msg = ("The lattice constants should in ascending order for a ."
"body-centered, orthorhombic lattice.")
raise ValueError(msg.format(lattice_constants))
a1 = [-a/2, b/2, c/2]
a2 = [ a/2, -b/2, c/2]
a3 = [ a/2, b/2, -c/2]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "base-centered orthorhombic":
if (np.isclose(a, b) or np.isclose(b, c) or np.isclose(a, c)):
msg = ("The lattice constants should all be different for a "
"base-centered, orthorhombic lattice.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, np.pi/2) and np.isclose(beta, np.pi/2)
and np.isclose(gamma, np.pi/2)):
msg = ("The lattice angles should all be the same and equal to pi/2"
" for a base-centered, orthorhombic lattice.")
raise ValueError(msg.format(lattice_angles))
if not (a < b < c):
msg = ("The lattice constants should in ascending order for a ."
"base-centered, orthorhombic lattice.")
raise ValueError(msg.format(lattice_constants))
a1 = [a/2, -b/2, 0]
a2 = [a/2, b/2, 0]
a3 = [ 0, 0, c]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "hexagonal":
if not (np.isclose(a, b) and
not np.isclose(b, c)):
msg = ("For a hexagonal lattice, a = b != c where "
"a, b, and c are the first, second, and third entries in "
"lattice_constants, respectively.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, beta) and np.isclose(beta, np.pi/2) and
np.isclose(gamma, 2*np.pi/3)):
msg = ("The first two lattice angles, alpha and beta, should be the "
"same and equal to pi/2 while the third gamma should be "
"2pi/3 radians for a hexagonal lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [a/2, -a*np.sqrt(3)/2, 0]
a2 = [a/2, a*np.sqrt(3)/2, 0]
a3 = [0, 0, c]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "rhombohedral":
if not (np.isclose(a, b) and np.isclose(b,c) and np.isclose(a, c)):
msg = ("For a rhombohedral lattice, a = b = c where "
"a, b, and c are the first, second, and third entries in "
"lattice_constants, respectively.")
raise ValueError(msg.format(lattice_constants))
if not (np.isclose(alpha, beta) and np.isclose(beta, gamma) and
np.isclose(alpha, gamma)):
msg = ("All lattice angles should be the same for a rhombohedral "
"lattice.")
raise ValueError(msg.format(lattice_angles))
if (np.isclose(alpha, np.pi/2) or np.isclose(beta, np.pi/2) or
np.isclose(gamma, np.pi/2)):
msg = ("No lattice angle should be equal to pi/2 radians for a "
"rhombohedral lattice.")
raise ValueError(msg.format(lattice_angles))
if (np.isclose(alpha, np.pi/3) or np.isclose(beta, np.pi/3) or
np.isclose(gamma, np.pi/3)):
msg = ("No lattice angle should be equal to pi/3 radians for a "
"rhombohedral lattice.")
raise ValueError(msg.format(lattice_angles))
if (np.isclose(alpha, np.arccos(-1/3)) or np.isclose(beta, np.arccos(-1/3)) or
np.isclose(gamma, np.arccos(-1/3))):
msg = ("No lattice angle should be equal to arccos(-1/3) radians for a "
"rhombohedral lattice.")
raise ValueError(msg.format(lattice_angles))
if (alpha > 2*np.pi/3):
msg = ("The lattice angle should be less than 2*pi/3 radians for a "
"rhombohedral lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [a*np.cos(alpha/2), -a*np.sin(alpha/2), 0]
a2 = [a*np.cos(alpha/2), a*np.sin(alpha/2), 0]
a3x = a*np.cos(alpha)/abs(np.cos(alpha/2))
a3 = [a3x, 0, np.sqrt(a**2 - a3x**2)]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "monoclinic":
if (a > c or b > c):
msg = ("The first and second lattice constants, a and b, should "
"both be less than or equal to the last lattice constant, c,"
" for a monoclinic lattice.")
raise ValueError(msg.format(lattice_constants))
if (np.isclose(a,b) or np.isclose(b,c) or np.isclose(a,c)):
msg = ("No two lattice constants are the same for a monoclinic "
"lattice.")
raise ValueError(msg.format(lattice_constants))
if alpha >= np.pi/2:
msg = ("The first lattice angle, alpha, should be less than pi/2 "
"radians for a monoclinic lattice.")
raise ValueError(msg.format(lattice_angles))
if not (np.isclose(beta, np.pi/2) and np.isclose(gamma, np.pi/2)):
msg = ("The second and third lattice angles, beta and gamma, "
"should both be pi/2 radians for a monoclinic lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [a, 0, 0]
a2 = [0, b, 0]
a3 = [0, c*np.cos(alpha), c*np.sin(alpha)]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "base-centered monoclinic":
if (a > c or b > c):
msg = ("The first and second lattice constants, a and b, should "
"both be less than or equal to the last lattice constant c. "
" for a monoclinic lattice.")
raise ValueError(msg.format(lattice_constants))
if alpha >= np.pi/2:
msg = ("The first lattice angle, alpha, should be less than pi/2 "
"radians for a monoclinic lattice.")
raise ValueError(msg.format(lattice_angles))
if not (np.isclose(beta, np.pi/2) and np.isclose(gamma, np.pi/2)):
msg = ("The second and third lattice angles, beta and gamma, "
"should both be pi/2 radians for a monoclinic lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [ a/2, b/2, 0]
a2 = [-a/2, b/2, 0]
a3 = [0, c*np.cos(alpha), c*np.sin(alpha)]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
elif lattice_type == "triclinic":
if (np.isclose(a, b) or np.isclose(b, c) or np.isclose(a, c)):
msg = ("The lattice constants should all be different for a "
"triclinic lattice.")
raise ValueError(msg.format(lattice_constants))
if (np.isclose(alpha, beta) or np.isclose(beta, gamma) or
np.isclose(alpha, gamma)):
msg = ("The lattice angles should all be different for a "
"triclinic lattice.")
raise ValueError(msg.format(lattice_angles))
a1 = [a, 0, 0]
a2 = [b*np.cos(gamma), b*np.sin(gamma), 0]
a3 = [c*np.cos(beta), c/np.sin(gamma)*(np.cos(alpha) -
np.cos(beta)*np.cos(gamma)),
c/np.sin(gamma)*np.sqrt(np.sin(gamma)**2 - np.cos(alpha)**2 -
np.cos(beta)**2 + 2*np.cos(alpha)*
np.cos(beta)*np.cos(gamma))]
lattice_vectors = np.transpose(np.array([a1, a2, a3], dtype=float))
return lattice_vectors
else:
msg = "Please provide a valid lattice type."
raise ValueError(msg.format(lattice_type))
def sym_path(lattice, npts, cart=False):
"""Create an array of lattice coordinates along the symmetry paths of
the lattice.
Args:
lattice (:py:obj:`BZI.symmetry.Lattice`): an instance of the Lattice
class.
npts (int): the number of points on each symmetry path.
cart (bool): if true, return the k-points in Cartesian coordinates. The
reciprocal lattice vectors will be used in the conversion.
Return:
(numpy.array): an array of lattice coordinates along the symmetry
paths.
"""
paths = []
for i,sym_pair in enumerate(lattice.symmetry_paths):
sym_pti = lattice.symmetry_points[sym_pair[0]]
sym_ptf = lattice.symmetry_points[sym_pair[1]]
pxi = sym_pti[0]
pxf = sym_ptf[0]
pyi = sym_pti[1]
pyf = sym_ptf[1]
pzi = sym_pti[2]
pzf = sym_ptf[2]
px = np.linspace(pxi,pxf,npts)
py = np.linspace(pyi,pyf,npts)
pz = np.linspace(pzi,pzf,npts)
ipath = [[px[j],py[j],pz[j]] for j in range(len(px))]
if i == 0:
paths += ipath
else:
del ipath[-1]
paths += ipath
if cart:
return [np.dot(lattice.reciprocal_vectors, k) for k in paths]
else:
return paths
def find_point_group(lat_vecs, eps=1e-9):
"""Return the point group of a lattice.
Args:
lat_vecs (numpy.ndarray or list): the vectors as the columns of a matrix.
Returns
pg (list): A list of the operators in the point group.
"""
# _get_lattice_pointGroup has the vectors as rows instead of columns.
lat_vecs = np.transpose(lat_vecs)
return get_lattice_pointGroup(lat_vecs, eps)
def find_space_group(lattice_vectors, atom_labels, atomic_basis, coords="Cart", eps=1e-6):
"""Get the point group and fractional translations of a crystal using phonon-
enumeration's `get_spaceGroup`.
Args:
lattice_vectors (list or numpy.ndarray): the lattice vectors, in Cartesian
coordinates, as columns of a 3x3 array.
atom_labels (list): a list of atoms labels. Each label should be distince for each
atomic species. The labels must start at zero and should be in the same order
as atomic basis.
atomic_basis (list or numpy.ndarray): a list of atomic positions in Cartesian
(default) or lattice coordinates.
coords (bool): specifies the coordinate system of the atomic basis.
eps (float): finite precision parameter.
Returns:
point_group (list): a list of point group operations.
translations (list): a list of translations
"""
if coords == "Cart":
point_group, translations = get_spaceGroup(np.transpose(lattice_vectors),
atom_labels, atomic_basis,
lattcoords=False, eps=1e-6)
else:
point_group, translations = get_spaceGroup(np.transpose(lattice_vectors),
atom_labels, atomic_basis,
lattcoords=True, eps=1e-6)
return point_group, translations
def shells(vector, lat_vecs):
"""Find the vectors that are equivalent to another vector by symmetry
Args:
vector (list or numpy.ndarray): a vector in cartesian coordinates.
lat_vecs (numpy.ndarray or list): a matrix with the lattice vectors as columns.
Returns:
unique_shells (list): a list of vectors expressed as numpy arrays.
"""
pointgroup = find_point_group(lat_vecs)
all_shells = (np.dot(pointgroup, vector)).tolist()
unique_shells = []
for sh in all_shells:
if any([np.allclose(sh, us) for us in unique_shells]) == True:
continue
else:
unique_shells.append(np.array(sh))
tol = 1.e-10
for (i,us) in enumerate(unique_shells):
for (j,elem) in enumerate(us):
if np.abs(elem) < tol:
unique_shells[i][j] = 0.
return unique_shells
def shells_list(vectors, lat_vecs):
"""Returns a list of several shells useful for constructing pseudopotentials.
Args:
vector (list or numpy.ndarray): a vector in cartesian coordinates.
Returns:
unique_shells (list): a list of vectors expressed as numpy arrays.
Example:
>>> from bzi.symmetry import sc_shells
>>> vectors = [[0.,0.,0.], [1.,0.,0.]]
>>> sc_shells_list(vector)
"""
nested_shells = [shells(i, lat_vecs) for i in vectors]
return np.array(list(itertools.chain(*nested_shells)))
def get_orbits(grid_car, lat_vecs, rlat_vecs, atom_labels, atom_positions,
kpt_coords = "Cart", atom_coords="lat", duplicates=False, pointgroup=None,
complete_orbit=False, unit_cell=True, pg_coords="lat", eps=1e-10, rtol=1e-4,
atol=1e-6):
"""Find the partial orbitals of the points in a grid, including only the
points that are in the grid. This symmetry reduction routine doesn't scale
linearly. It is highly recommended that you use find_orbits instead.
Args:
grid_car (numpy.ndarray): a list of grid point positions in Cartesian
coordinates.
lat_vecs (numpy.ndarray): the lattice vectors as the columns of a 3x3 array
in Cartesian coordinates.
rlat_vecs (numpy.ndarray): the reciprocal lattice vectors as the columns of a 3x3
array in Cartesian coordinates.
atom_labels (list): a list of atoms labels. Each label should be distince for each
atomic species. The labels must start at zero and should be in the same order
as atomic basis.
atom_positions (list or numpy.ndarray): a list of atom positions in Cartesian
(default) or lattice coordinates.
kpt_coords (str): a string that indicates coordinate system of the returned
k-points. It can be in Cartesian ("cart") or lattice ("lat").
atom_coords (str): a string that indicates coordinate system of the atom positions
It can be in Cartesian ("cart") or lattice ("lat").
duplicates (bool): if there are points in the grid outside the first
unit cell, duplicates should be true.
pointgroup (list): a list of point group symmetry operators in lattice
coordinates.
complete_orbit (bool): if true, the complete orbit of each k-point is returned.
unit_cell (bool): if true, return the points of the orbits in the first unit cell.
Has no effect unless complete_orbit = True.
pg_coords (string): the coordinates of the point group: "lat" stands for lattice
and "Cart" for Cartesian.
eps (float): finite precision parameter used when finding points within a sphere.
rtol (float): a relative tolerance used when finding if two k-points are
equivalent.
atol (float): an absolute tolerance used when finding if two k-points are
equivalent.
Returns:
gp_orbits (dict): the orbits of the grid points in a nested list.
orbit_wts (list): the number of k-points in each orbit.
"""
if type(grid_car) == list:
if type(grid_car[0]) == list:
grid_car = np.array(grid_car)
else:
grid_car = np.array([g.tolist() for g in grid_car])
else:
if type(grid_car[0]) == list:
grid_car = np.array([g.tolist() for g in grid_car])
else:
pass
# Put the grid in lattice coordinates and move it into the first unit cell.
grid_lat = bring_into_cell(grid_car, rlat_vecs, atol=atol, rtol=rtol, coords="lat")
# Remove duplicates if necessary.
if duplicates:
grid_copy = list(deepcopy(grid_lat))
grid_lat = []
while len(grid_copy) != 0:
gp = grid_copy.pop()
if any([np.allclose(gp, gc, rtol=rtol, atol=atol) for gc in grid_copy]):
continue
else:
grid_lat.append(gp)
gp_orbits = []
grid_copy = list(deepcopy(grid_lat))
if pointgroup is None:
pointgroup, translations = get_space_group(lat_vecs, atom_labels, atom_positions,
coords=atom_coords, rtol=rtol,
atol=atol, eps=eps)
else:
if pg_coords != "lat":
pointgroup = np.matmul(np.matmul(inv(lat_vecs), pointgroup), lat_vecs)
while len(grid_copy) != 0:
g = grid_copy.pop()
# Start a new orbit.
gp_orbits.append([g])
for pg in pointgroup:
# Check both the k-point that is moved back into the unit cell and one that
# isn't.
new_grid_point = np.dot(pg, g)
new_grid_point_car = np.dot(rlat_vecs, new_grid_point)
new_grid_point_cell = bring_into_cell(new_grid_point, rlat_vecs, rtol=rtol,
atol=atol, coords="lat")
new_gps = [new_grid_point_cell, new_grid_point]
# Add all unique k-points traversed by the orbit regardless of them
# belonging to the k-point grid.
if complete_orbit:
if unit_cell:
# Only include this point in the orbit if it is unique.
if not check_contained(new_kps[0], gp_orbits[-1], rtol=rtol,
atol=atol):
gp_orbits[-1].append(new_gps[0])
else:
# Add points to the orbit without translating them back into the first
# unit cell
if not check_contained(new_kps[1], gp_orbits[-1], rtol=rtol,
atol=atol):
gp_orbits[-1].append(new_gps[1])
else:
# If the new grid point is in the grid, remove it and add it to the
# orbit of grid point (g).
for new_gp in new_gps:
indices = find_point_indices(new_gp, grid_copy, rtol=rtol, atol=atol)
if len(indices) > 1:
msg = "There are duplicate points in the grid."
raise ValueError(msg)
elif len(indices) == 0:
continue
else:
gp_orbits[-1].append(new_gp)
del grid_copy[indices[0]]
orbit_wts = [len(orb) for orb in gp_orbits]
if kpt_coords == "Cart":
for i in range(len(gp_orbits)):
gp_orbits[i] = np.dot(rlat_vecs, np.array(gp_orbits[i]).T).T
return gp_orbits, orbit_wts
elif kpt_coords == "lat":
return gp_orbits, orbit_wts
else:
raise ValueError("Coordinate options are 'Cart' and 'lat'.")
# def find_full_orbitals(grid_car, lat_vecs, coord = "Cart", unitcell=False):
# """ Find the complete orbitals of the points in a grid, including points
# not contained in the grid.
# Args:
# grid_car (list): a list of grid point positions in cartesian coordinates.
# lat_vecs (numpy.ndarray): the vectors that define the integration cell
# coord (string): tell it to return the orbits in Cartesian ("cart",
# default) or lattice ("lat") coordinates.
# unitcell (string): return the points in the orbits in the first unit
# cell when true.
# Returns:
# gp_orbits (dict): the orbitals of the grid points in a dictionary.
# The keys of the dictionary are integer labels and the values are the
# grid points in the orbital.
# """
# grid_car = np.array(grid_car)
# grid_lat = (np.dot(inv(lat_vecs), grid_car.T).T).tolist()
# gp_orbits = {}
# nirr_kpts = 0
# grid_copy = deepcopy(grid_lat)
# pointgroup = find_point_group(lat_vecs)
# # To move an operator into lattice coordinates you have to take the product
# # L^(-1) O L where L is the lattice vectors and O is the operator.
# pointgroup = np.matmul(np.matmul(inv(lat_vecs), pointgroup), lat_vecs)
# while grid_copy != []:
# # Grap a point and build its orbit but only include points from the grid.
# gp = grid_copy.pop()
# nirr_kpts += 1
# gp_orbits[nirr_kpts] = []
# for pg in pointgroup:
# # If the group operation moves the point outside the cell, %1 moves
# # it back in.
# # I ran into floating point precision problems the last time I ran
# # %1. Just to be safe it's included here.
# # Move the k-point into the first unit cell is requested.
# if unitcell:
# new_gp = np.round(np.dot(pg, gp), 15)%1
# else:
# new_gp = np.round(np.dot(pg, gp), 15)
# if any([np.allclose(new_gp, gc) for gc in grid_copy]):
# ind = np.where(np.array([np.allclose(new_gp, gc) for gc in grid_copy]) == True)[0][0]
# del grid_copy[ind]
# gp_orbits[nirr_kpts].append(new_gp)
# else:
# gp_orbits[nirr_kpts].append(new_gp)
# continue
# if coord == "cart":
# for i in range(1, len(gp_orbits.keys()) + 1):
# for j in range(len(gp_orbits[i])):
# gp_orbits[i][j] = np.dot(lat_vecs, gp_orbits[i][j])
# return gp_orbits
# elif coord == "lat":
# return gp_orbits
# else:
# raise ValueError("Coordinate options are 'cell' and 'lat'.")
def find_lattice_type(centering_type, lattice_constants, lattice_angles):
"""Find the Bravais lattice type of the lattice.
Args:
centering_type (str): how points are centered in the conventional
unit cell of the lattice. Options include 'prim', 'base', 'body',
and 'face'.
lattice_constants (list or numpy.ndarray): the axial lengths
of the conventional lattice vectors.
lattice_angles (list or numpy.ndarray): the interaxial angles of the
conventional lattice vectors.
Returns:
(str): the Bravais lattice type.
Example:
>>> centering_type = "prim
>>> lattice_constants = [1]*3
>>> lattice_angles = [numpy.pi/2]*3
>>> lattice_type = find_lattice_type(centering_type,
lattice_constants,
lattice_angles)
"""
# Extract parameters.
a = float(lattice_constants[0])
b = float(lattice_constants[1])
c = float(lattice_constants[2])
alpha = float(lattice_angles[0])
beta = float(lattice_angles[1])
gamma = float(lattice_angles[2])
# Lattices with all angles = pi/2.
if (np.isclose(alpha, beta) and np.isclose(beta, gamma) and
np.isclose(gamma, np.pi/2)):
# Check if it is a cubic lattice.
if (np.isclose(a,b) and np.isclose(b,c)):
if centering_type == "body":
return "body-centered cubic"
elif centering_type == "prim":
return "simple cubic"
elif centering_type == "face":
return "face-centered cubic"
else:
msg = ("Valid centering types for cubic lattices include "
"'prim', 'body', and 'face'.")
raise ValueError(msg.format(centering_type))
# Check if it is tetragonal.
elif (np.isclose(a,b) and not np.isclose(b,c)):
if centering_type == "prim":
return "tetragonal"
elif centering_type == "body":
return "body-centered tetragonal"
else:
msg = ("Valid centering types for tetragonal lattices include "
"'prim' and 'body'.")
raise ValueError(msg.format(centering_type))
# Check if it is orthorhombic
elif not (np.isclose(a,b) and np.isclose(b,c) and np.isclose(a,c)):
if centering_type == "body":
return "body-centered orthorhombic"
elif centering_type == "prim":
return "orthorhombic"
elif centering_type == "face":
return "face-centered orthorhombic"
elif centering_type == "base":
return "base-centered orthorhombic"
else:
msg = ("Valid centering types for orthorhombic lattices include "
"'prim', 'base', 'body', and 'face'.")
raise ValueError(msg.format(centering_type))
else:
msg = ("The lattice constants provided do not appear to correspond "
"to a Bravais lattice. They almost represent a cubic, "
"tetragonal, or orthorhombic lattice.")
raise ValueError(msg.format(lattice_constants))
# Check if it is rhombohedral.
elif (np.isclose(alpha, beta) and np.isclose(beta, gamma)):
if (np.isclose(a, b) and np.isclose(b,c)):
if centering_type == "prim":
return "rhombohedral"
else:
msg = ("The only valid centering type for rhombohedral lattices "
"is 'prim'.")
raise ValueError(msg.format(centering_type))
else:
msg = ("All of the lattice constants should have the same value "
"for a rhombohedral lattice")
raise ValueError(msg.format(lattice_constants))
# Check if it is hexagonal.
elif (np.isclose(alpha, beta) and np.isclose(beta, np.pi/2) and
np.isclose(gamma, 2*np.pi/3)):
if (np.isclose(a, b) and not np.isclose(b, c)):
if centering_type == "prim":
return "hexagonal"
else:
msg = ("The only valid centering type for hexagonal lattices "
"is 'prim'.")
raise ValueError(msg.format(centering_type))
else:
msg = ("For a hexagonal lattice, a = b != c.")
raise ValueError(msg.format(lattice_constants))
# Check if it is monoclinic
# Monoclinic a,b <= c, alpha < pi/2, beta = gamma = pi/2, a != b != c
elif (np.isclose(beta, gamma) and np.isclose(beta, np.pi/2) and
(alpha < np.pi/2)):
if ((a < c or np.isclose(a, c)) and (b < c or np.isclose(b,c))):
if centering_type == "prim":
return "monoclinic"
elif centering_type == "base":
return "base-centered monoclinic"
else:
msg = ("Valid centering types for monoclinic lattices include "
"'prim' and 'base'.")
raise ValueError(msg.format(centering_type))
else:
msg = ("The lattice constants of a monoclinic lattice should be "
"arranged such that a, b <= c.")
raise ValueError(msg.format(lattice_constants))
# Check if the lattice is triclinic.
elif not (np.isclose(alpha, beta) and np.isclose(beta, gamma) and
np.isclose(alpha, gamma)):
if not (np.isclose(a, b) and np.isclose(b, c) and np.isclose(a, c)):
if centering_type == "prim":
return "triclinic"
else:
msg = ("The onld valid centering type for triclinic "
"lattices is 'prim'.")
raise ValueError(msg.format(centering_type))
else:
msg = ("None of the lattice constants are equivalent for a "
"triclinic lattice.")
raise ValueError(msg.format(lattice_constants))
else:
msg = ("The lattice angles provided do not correspond to any Bravais "
"lattice type.")
raise ValueError(msg.format(lattice_angles))
# Find transformation to create HNF from integer matrix.
def get_minmax_indices(a):
"""Find the maximum and minimum elements of a list that aren't zero.
Args:
a (numpy.ndarray): a three element numpy array.
Returns:
minmax (list): the minimum and maximum values of array a with the
minimum first and maximum second.
"""
a = np.abs(a)
maxi = 2 - np.argmax(a[::-1])
min = 0
i = 0
while min == 0:
min = a[i]
i += 1
mini = i-1
for i,ai in enumerate(a):
if ai > 0 and ai < min:
min = ai
mini = i
return np.asarray([mini, maxi])
def swap_column(M, B, k):
"""Swap the column k with whichever column has the highest value (out of
the columns to the right of k in row k). The swap is performed for both
matrices M and B.
Args:
M (numpy.ndarray): the matrix being transformed
B (numpy.ndarray): a matrix to keep track of the transformation steps
on M.
k (int): the column to swap, as described in summary.
"""
Ms = deepcopy(M)
Bs = deepcopy(B)
# Find the index of the non-zero element in row k.
maxidx = np.argmax(np.abs(Ms[k,k:])) + k
tmpCol = deepcopy(Bs[:,k]);
Bs[:,k] = Bs[:,maxidx]
Bs[:,maxidx] = tmpCol
tmpCol = deepcopy(Ms[:,k])
Ms[:,k] = Ms[:, maxidx]
Ms[:,maxidx] = tmpCol
return Ms, Bs
def swap_row(M, B, k):
"""Swap the row k with whichever row has the highest value (out of
the rows below k in column k). The swap is performed for both matrices M and B.
Args:
M (numpy.ndarray): the matrix being transformed
B (numpy.ndarray): a matrix to keep track of the transformation steps
on M.
k (int): the column to swap, as described in summary.
"""
Ms = deepcopy(M)
Bs = deepcopy(B)
# Find the index of the non-zero element in row k.
maxidx = np.argmax(np.abs(Ms[k:,k])) + k
tmpCol = deepcopy(Bs[k,:]);
Bs[k,:] = Bs[maxidx,:]
Bs[maxidx,:] = tmpCol
tmpRow = deepcopy(Ms[k,:])
Ms[k,:] = Ms[maxidx,:]
Ms[maxidx,:] = tmpRow
return Ms, Bs
def HermiteNormalForm(S, eps=10):
"""Find the Hermite normal form (HNF) of a given integer matrix and the
matrix that mediates the transformation.
Args:
S (numpy.ndarray): The 3x3 integer matrix describing the relationship
between two commensurate lattices.
eps (int): finite precision parameter that determines number of decimals
kept when rounding.
Returns:
H (numpy.ndarray): The resulting HNF matrix.
B (numpy.ndarray): The transformation matrix such that H = SB.
"""
if np.linalg.det(S) == 0:
raise ValueError("Singular matrix passed to HNF routine")
B = np.identity(np.shape(S)[0]).astype(int)
H = deepcopy(S)
# Keep doing column operations until all elements in the first row are zero
# except for the one on the diagonal.
while np.count_nonzero(H[0,:]) > 1:
# Divide the column with the smallest value into the largest.
minidx, maxidx = get_minmax_indices(H[0,:])
minm = H[0,minidx]
# Subtract a multiple of the column containing the smallest element from
# the column containing the largest element.
multiple = int(H[0, maxidx]/minm)
H[:, maxidx] = H[:, maxidx] - multiple*H[:, minidx]
B[:, maxidx] = B[:, maxidx] - multiple*B[:, minidx]
if np.allclose(np.dot(S, B), H) == False:
raise ValueError("COLS: Transformation matrices didn't work.")
if H[0,0] == 0:
H, B = swap_column(H, B, 0) # Swap columns if (0,0) is zero.
if H[0,0] < 0:
H[:,0] = -H[:,0]
B[:,0] = -B[:,0]
if np.count_nonzero(H[0,:]) > 1:
raise ValueError("Didn't zero out the rest of the row.")
if np.allclose(np.dot(S, B), H) == False:
raise ValueError("COLSWAP: Transformation matrices didn't work.")
# Now work on element H[1,2].
while H[1,2] != 0:
if H[1,1] == 0:
tempcol = deepcopy(H[:,1])
H[:,1] = H[:,2]
H[:,2] = tempcol
tempcol = deepcopy(B[:,1])
B[:,1] = B[:,2]
B[:,2] = tempcol
if H[1,2] == 0:
break
if np.abs(H[1,2]) < np.abs(H[1,1]):
maxidx = 1
minidx = 2
else:
maxidx = 2
minidx = 1
multiple = int(H[1, maxidx]/H[1,minidx])
H[:,maxidx] = H[:, maxidx] - multiple*H[:,minidx]
B[:,maxidx] = B[:, maxidx] - multiple*B[:,minidx]
if np.allclose(np.dot(S, B), H) == False:
raise ValueError("COLS: Transformation matrices didn't work.")
if H[1,1] == 0:
tempcol = deepcopy(H[:,1])
H[:,1] = H[:,2]
H[:,2] = tempcol
if H[1,1] < 0: # change signs
H[:,1] = -H[:,1]
B[:,1] = -B[:,1]
if H[1,2] != 0:
raise ValueError("Didn't zero out last element.")
if np.allclose(np.dot(S,B), H) == False:
raise ValueError("COLSWAP: Transformation matrices didn't work.")
if H[2,2] < 0: # change signs
H[:,2] = -H[:,2]
B[:,2] = -B[:,2]
check1 = (np.array([0,0,1]), np.array([1,2,2]))
if np.count_nonzero(H[check1]) != 0:
raise ValueError("Not lower triangular")
if np.allclose(np.dot(S, B), H) == False:
raise ValueError("End Part1: Transformation matrices didn't work.")
# Now that the matrix is in lower triangular form, make sure the lower
# off-diagonal elements are non-negative but less than the diagonal
# elements.
while H[1,1] <= H[1,0] or H[1,0] < 0:
if H[1,1] <= H[1,0]:
multiple = 1
else:
multiple = -1
H[:,0] = H[:,0] - multiple*H[:,1]
B[:,0] = B[:,0] - multiple*B[:,1]
for j in [0,1]:
while H[2,2] <= H[2,j] or H[2,j] < 0:
if H[2,2] <= H[2,j]:
multiple = 1
else:
multiple = -1
H[:,j] = H[:,j] - multiple*H[:,2]
B[:,j] = B[:,j] - multiple*B[:,2]
if np.allclose(np.dot(S, B), H) == False:
raise ValueError("End Part1: Transformation matrices didn't work.")
if np.count_nonzero(H[check1]) != 0:
raise ValueError("Not lower triangular")
check2 = (np.asarray([0, 1, 1, 2, 2, 2]), np.asarray([0, 0, 1, 0, 1, 2]))
if any(H[check2] < 0) == True:
raise ValueError("Negative elements in lower triangle.")
if H[1,0] > H[1,1] or H[2,0] > H[2,2] or H[2,1] > H[2,2]:
raise ValueError("Lower triangular elements bigger than diagonal.")
H = np.round(H, eps).astype(int)
return H, B
def UpperHermiteNormalForm(S):
"""Find the Hermite normal form (HNF) of a given integer matrix and the
matrix that mediates the transformation.
Args:
S (numpy.ndarray): The 3x3 integer matrix describing the relationship
between two commensurate lattices.
Returns:
H (numpy.ndarray): The resulting HNF matrix.
B (numpy.ndarray): The transformation matrix such that H = SB.
"""
if np.linalg.det(S) == 0:
raise ValueError("Singular matrix passed to HNF routine")
B = np.identity(np.shape(S)[0]).astype(int)
H = deepcopy(S)
# Keep doing row operations until all elements in the first column are zero
# except for the one on the diagonal.
while np.count_nonzero(H[:,0]) > 1:
# Divide the row with the smallest value into the largest.
minidx, maxidx = get_minmax_indices(H[:,0])
minm = H[minidx,0]
# Subtract a multiple of the row containing the smallest element from
# the row containing the largest element.
multiple = int(H[maxidx,0]/minm)
H[maxidx,:] = H[maxidx,:] - multiple*H[minidx,:]
B[maxidx,:] = B[maxidx,:] - multiple*B[minidx,:]
if np.allclose(np.dot(B, S), H) == False:
raise ValueError("ROWS: Transformation matrices didn't work.")
if H[0,0] == 0:
H, B = swap_row(H, B, 0) # Swap rows if (0,0) is zero.
if H[0,0] < 0:
H[0,:] = -H[0,:]
B[0,:] = -B[0,:]
if np.count_nonzero(H[:,0]) > 1:
raise ValueError("Didn't zero out the rest of the row.")
if np.allclose(np.dot(B,S), H) == False:
raise ValueError("ROWSWAP: Transformation matrices didn't work.")
# Now work on element H[2,1].
while H[2,1] != 0:
if H[1,1] == 0:
temprow = deepcopy(H[1,:])
H[1,:] = H[2,:]
H[2,:] = temprow
temprow = deepcopy(B[1,:])
B[1,:] = B[2,:]
B[2,:] = temprow
break
if np.abs(H[2,1]) < np.abs(H[1,1]):
maxidx = 1
minidx = 2
else:
maxidx = 2
minidx = 1
multiple = int(H[maxidx,1]/H[minidx,1])
H[maxidx,:] = H[maxidx,:] - multiple*H[minidx,:]
B[maxidx,:] = B[maxidx,:] - multiple*B[minidx,:]
if np.allclose(np.dot(B,S), H) == False:
raise ValueError("COLS: Transformation matrices didn't work.")
if H[1,1] == 0:
temprow = deepcopy(H[1,:])
H[1,:] = H[0,:]
H[0,:] = temprow
if H[1,1] < 0: # change signs
H[1,:] = -H[1,:]
B[1,:] = -B[1,:]
if H[1,0] != 0:
raise ValueError("Didn't zero out last element.")
if np.allclose(np.dot(B,S), H) == False:
raise ValueError("COLSWAP: Transformation matrices didn't work.")
if H[2,2] < 0: # change signs
H[2,:] = -H[2,:]
B[2,:] = -B[2,:]
check1 = (np.array([2,2,1]), np.array([1,0,0]))
if np.count_nonzero(H[check1]) != 0:
raise ValueError("Not lower triangular")
if np.allclose(np.dot(B,S), H) == False:
raise ValueError("End Part1: Transformation matrices didn't work.")
# Now that the matrix is in lower triangular form, make sure the lower
# off-diagonal elements are non-negative but less than the diagonal
# elements.
while H[1,1] <= H[0,1] or H[0,1] < 0:
if H[1,1] <= H[0,1]:
multiple = 1
else:
multiple = -1
H[0,:] = H[0,:] - multiple*H[1,:]
B[0,:] = B[0,:] - multiple*B[1,:]
for j in [0,1]:
while H[2,2] <= H[j,2] or H[j,2] < 0:
if H[2,2] <= H[j,2]:
multiple = 1
else:
multiple = -1
H[j,:] = H[j,:] - multiple*H[2,:]
B[j,:] = B[j,:] - multiple*B[2,:]
if np.allclose(np.dot(B, S), H) == False:
raise ValueError("End Part1: Transformation matrices didn't work.")
if np.count_nonzero(H[check1]) != 0:
raise ValueError("Not lower triangular")
check2 = (np.asarray([0, 0, 0, 1, 1, 2]), np.asarray([0, 1, 2, 1, 2, 2]))
if any(H[check2] < 0) == True:
raise ValueError("Negative elements in lower triangle.")
if H[0,1] > H[1,1] or H[0,2] > H[2,2] or H[1,2] > H[2,2]:
raise ValueError("Lower triangular elements bigger than diagonal.")
return H, B
def find_kpt_index(kpt, invK, L, D, eps=4):
"""This function takes a k-point in Cartesian coordinates and "hashes" it
into a single number, corresponding to its place in the k-point list.
Args:
kpt (list or numpy.ndarray): the k-point in Cartesian coordinates
invK(list or numpy.ndarray): the inverse of the k-point grid generating
vectors
L (list or numpy.ndarray): the left transform for the SNF conversion
D (list or numpy.ndarray): the diagonal of the SNF
eps (float): a finite-precision parameter that corresponds to the decimal
rounded when converting k-points from Cartesian to grid coordinates.
Returns:
_ (int): the unique index of the k-point in the first unit cell
"""
# Put the k-point in grid coordinates.
gpt = np.round(np.dot(invK, kpt), eps)
gpt = np.dot(L, gpt).astype(int)%D
# Convert from group coordinates to a single, base-10 number between 1 and
# the number of k-points in the unreduced grid.
# return gpt[0]*D[1]*D[2] + gpt[1]*D[2] + gpt[2]
return gpt[0]*D[1]*D[2] + gpt[1]*D[2] + gpt[2]
def bring_into_cell(points, rlat_vecs, rtol=1e-5, atol=1e-8, coords="Cart",
centered=False):
"""Bring a point or list of points into the first unit cell.
Args:
point (list or numpy.ndarray): a point or list of points in three space in
Cartesian coordinates.
rlat_vecs (numpy.ndarray): the lattice generating vectors as columns of a 3x3
array.
Returns:
_ (numpy.ndarray): a point or list of points in three space inside the first
unit cell in Cartesian (default) or lattice coordinates.
"""
# Convert to lattice coordinates and move points into the first unit cell.
points = np.array(points)
lat_points = np.dot(inv(rlat_vecs), points.T).T%1
# Special care has to be taken for points near 1.
lat_points[np.isclose(lat_points, 1, rtol=rtol, atol=atol)] = 0
# Shift points again if the unit cell is centered at the origin.
if centered:
lat_points[lat_points > 0.5] = lat_points[lat_points > 0.5] - 1
# Special care has to be taken for points near 1/2.
lat_points[np.isclose(lat_points, 0.5, rtol=rtol, atol=atol)] = -0.5
# Convert back to Cartesian coordinates.
if coords == "Cart":
return np.dot(rlat_vecs, lat_points.T).T
elif coords == "lat":
return lat_points
else:
msg = "Coordinate options include 'Cart' and 'lat'."
raise ValueError(msg)
def reduce_kpoint_list(kpoint_list, lattice_vectors, grid_vectors, shift,
eps=9, rtol=1e-5, atol=1e-8):
"""Use the point group symmetry of the lattice vectors to reduce a list of
k-points.
Args:
kpoint_list (list or numpy.ndarray): a list of k-point positions in
Cartesian coordinates.
lattice_vectors (list or numpy.ndarray): the vectors that generate the
reciprocal lattice in a 3x3 array with the vectors as columns.
grid_vectors (list or numpy.ndarray): the vectors that generate the
k-point grid in a 3x3 array with the vectors as columns in
Cartesian coordinates.
shift (list or numpy.ndarray): the offset of the k-point grid in grid
coordinates.
Returns:
reduced_kpoints (list): an ordered list of irreducible k-points
kpoint_weights (list): an ordered list of irreducible k-point weights.
"""
try:
inv(lattice_vectors)
except np.linalg.linalg.LinAlgError:
msg = "The lattice generating vectors are linearly dependent."
raise ValueError(msg.format(lattice_vectors))
try:
inv(grid_vectors)
except np.linalg.linalg.LinAlgError:
msg = "The grid generating vectors are linearly dependent."
raise ValueError(msg.format(lattice_vectors))
if abs(det(lattice_vectors)) < abs(det(grid_vectors)):
msg = """The k-point generating vectors define a grid with a unit cell
larger than the reciprocal lattice unit cell."""
raise ValueError(msg.format(grid_vectors))
# Put the shift in Cartesian coordinates.
shift = np.dot(grid_vectors, shift)
# Check that the lattice and grid vectors are commensurate.
check, N = check_commensurate(grid_vectors, lattice_vectors, rtol=rtol, atol=atol)
if not check:
msg = "The lattice and grid vectors are incommensurate."
raise ValueError(msg.format(grid_vectors))
# Find the HNF of N. B is the transformation matrix (BN = H).
H,B = HermiteNormalForm(N)
H = [list(H[i]) for i in range(3)]
# Find the SNF of H. L and R are the left and right transformation matrices
# (LHR = S).
S,L,R = SmithNormalForm(H)
# Get the diagonal of SNF.
D = np.round(np.diag(S), eps).astype(int)
cOrbit = 0 # unique orbit counter
pointgroup = find_point_group(lattice_vectors) # a list of point group operators
nSymOps = len(pointgroup) # the number of symmetry operations
nUR = len(kpoint_list) # the number of unreduced k-points
# A dictionary to keep track of the number of symmetrically-equivalent
# k-points.
hashtable = dict.fromkeys(range(nUR))
# A dictionary to keep track of the k-points that represent each orbital.
iFirst = {}
# A dictionary to keep track of the number of symmetrically-equivalent
# k-points in each orbit.
iWt = {}
invK = inv(grid_vectors)
# Loop over unreduced k-points.
for i in range(nUR):
ur_kpt = kpoint_list[i]
idx = find_kpt_index(ur_kpt - shift, invK, L, D, eps)
if hashtable[idx] != None:
continue
cOrbit += 1
hashtable[idx] = cOrbit
iFirst[cOrbit] = i
iWt[cOrbit] = 1
for pg in pointgroup:
# Rotate the k-point.
rot_kpt = np.dot(pg, ur_kpt)
# Bring it back into the first unit cell.
rot_kpt = bring_into_cell(rot_kpt, lattice_vectors)
if not np.allclose(np.dot(invK, rot_kpt-shift),
np.round(np.dot(invK, rot_kpt-shift))):
continue
idx = find_kpt_index(rot_kpt - shift, invK, L, D, eps)
if hashtable[idx] == None:
hashtable[idx] = cOrbit
iWt[cOrbit] += 1
sum = 0
kpoint_weights = list(iWt.values())
reduced_kpoints = []
for i in range(cOrbit):
sum += kpoint_weights[i]
reduced_kpoints.append(kpoint_list[iFirst[i+1]])
if sum != nUR:
msg = "There are more or less k-points after the symmetry reduction."
raise ValueError(msg)
return reduced_kpoints, kpoint_weights
def find_orbits(kpoint_list, lattice_vectors, rlattice_vectors, grid_vectors, shift,
atom_labels, atom_positions, full_orbit=False, kpt_coords="cart",
atom_coords="lat", eps=1e-10, rounding_eps=4, rtol=1e-4, atol=1e-6):
"""Use the point group symmetry of the lattice vectors to reduce a list of
k-points.
Args:
kpoint_list (list or numpy.ndarray): a list of k-point positions in
Cartesian coordinates.
lattice_vectors (list or numpy.ndarray): the vectors that generate the
lattice in a 3x3 array with the vectors as columns in Cartesian coordinates.
rlattice_vectors (list or numpy.ndarray): the vectors that generate the
reciprocal lattice in a 3x3 array with the vectors as columns in Cartesian
coordinates.
grid_vectors (list or numpy.ndarray): the vectors that generate the
k-point grid in a 3x3 array with the vectors as columns in
Cartesian coordinates.
shift (list or numpy.ndarray): the offset of the k-point grid in grid
coordinates.
atom_labels (list): a list of atoms labels. Each label should be distince for each
atomic species. The labels must start at zero and should be in the same order
as atomic basis.
atom_positions (list or numpy.ndarray): a list of atomic positions in Cartesian
(default) or lattice coordinates.
full_orbit (bool): if true, return the orbits with the list of k-points from
`kpoint_list`.
kpt_coords (str): a string that indicates coordinate system of the returned
k-points. It can be in Cartesian ("cart") or lattice ("lat").
atom_coords (str): a string that indicates coordinate system of the atom positions
It can be in Cartesian ("cart") or lattice ("lat").
eps (float): a finite precision parameter that is added to the norms of points in
`search_sphere`.
rounding_eps (int): a finite precision parameter that determines the number of
decimals kept when rounding.
rtol (float): a relative tolerance used when finding if two k-points are
equivalent.
atol (float): an absolute tolerance used when finding if two k-points are
equivalent.
Returns:
reduced_kpoints (list): an ordered list of irreducible k-points. If full_orbit
is True, return `orbits_list`, a list of all k-points in each orbit.
orbit_weights (list): an ordered list of the number of k-points in each orbit.
"""
try:
inv(lattice_vectors)
except np.linalg.linalg.LinAlgError:
msg = "The lattice generating vectors are linearly dependent."
raise ValueError(msg.format(lattice_vectors))
try:
inv(rlattice_vectors)
except np.linalg.linalg.LinAlgError:
msg = "The reciprocal lattice generating vectors are linearly dependent."
raise ValueError(msg.format(rlattice_vectors))
try:
inv(grid_vectors)
except np.linalg.linalg.LinAlgError:
msg = "The grid generating vectors are linearly dependent."
raise ValueError(msg.format(rlattice_vectors))
if abs(det(rlattice_vectors) + atol) < abs(det(grid_vectors)):
msg = """The k-point generating vectors define a grid with a unit cell
larger than the reciprocal lattice unit cell."""
raise ValueError(msg.format(grid_vectors))
# Put the shift in Cartesian coordinates.
shift = np.dot(grid_vectors, shift)
# Verify the grid and lattice are commensurate.
check, N = check_commensurate(grid_vectors, rlattice_vectors, rtol=rtol, atol=atol)
if not check:
msg = "The lattice and grid vectors are incommensurate."
raise ValueError(msg.format(grid_vectors))
# Find the HNF of N. B is the transformation matrix (BN = H).
H,B = HermiteNormalForm(N)
H = [list(H[i]) for i in range(3)]
# Find the SNF of H. L and R are the left and right transformation matrices
# (LHR = S).
S,L,R = SmithNormalForm(H)
# Get the diagonal of SNF.
D = np.round(np.diag(S), rounding_eps).astype(int)
# Unique orbit counter
cOrbit = 0
# A list of point group operators
pointgroup, translations = get_space_group(lattice_vectors, atom_labels,
atom_positions, coords=atom_coords,
rtol=rtol, atol=atol, eps=eps)
# The number of symmetry operations
nSymOps = len(pointgroup)
# The number of unreduced k-points
nUR = len(kpoint_list)
# A dictionary to keep track of the number of symmetrically-equivalent
# k-points. It goes from the k-point index (the one not associated with the k-point's
# position in `kpoint_list`) to the label of that k-point's orbit.
hashtable = dict.fromkeys(range(nUR))
# A dictionary to keep track of the k-points that represent each orbital.
# It goes from orbit label to the index of the k-point that represents
# the orbit.
iFirst = {}
# A dictionary to keep track of the number of symmetrically-equivalent
# k-points in each orbit.
iWt = {}
# A dictionary for converting between k-point indices. The keys are the k-point
# indices associated with the k-point's components. The values are the k-point
# indices associated with the k-point's location in `kpoint_list`.
kpt_index_conv = {}
invK = inv(grid_vectors)
# Loop over unreduced k-points.
for i in range(nUR):
# Grab an unreduced k-point.
ur_kpt = kpoint_list[i]
# Find the unreduced k-point's index.
kpt_hash = find_kpt_index(ur_kpt - shift, invK, L, D, rounding_eps)
kpt_index_conv[kpt_hash] = i
# If it has already been looked at because it was part of the orbit of a
# previous k-point, skip it.
if hashtable[kpt_hash] != None:
continue
# If this k-point hasn't already been looked at, increment the orbit counter.
cOrbit += 1
# Add this k-point to the hashtable.
hashtable[kpt_hash] = cOrbit
# Make it the representative k-point for this orbit.
iFirst[cOrbit] = i
# Initialize the weight of this orbit.
iWt[cOrbit] = 1
# Loop through the point group operators.
for pg in pointgroup:
# Rotate the k-point.
rot_kpt = np.dot(pg, ur_kpt)
# Bring it back into the first unit cell.
rot_kpt = bring_into_cell(rot_kpt, rlattice_vectors)
# Verify that this point is part of the grid. If not, discard it.
if not np.allclose(np.dot(invK, rot_kpt-shift),
np.round(np.dot(invK, rot_kpt-shift)), rtol=rtol):
continue
# Find the index of the rotated k-point.
kpt_hash = find_kpt_index(rot_kpt - shift, invK, L, D, rounding_eps)
# If this k-point hasn't been hit during the traversal of the orbit,
# add the rotated k-point to this orbit and increment the orbit's weight.
if hashtable[kpt_hash] == None:
hashtable[kpt_hash] = cOrbit
iWt[cOrbit] += 1
# Remove empty entries from hashtable.
hashtable = dict((k, v) for k, v in hashtable.items() if v)
# Find the reduced k-points from the indices of the representative k-points in
# iFirst.
kpoint_list = np.array(kpoint_list)
# test_indices = [kpt_index_conv[i] for i in list(iFirst.values())]
# reduced_kpoints = kpoint_list[test_indices]
reduced_kpoints = kpoint_list[list(iFirst.values())]
orbit_weights = list(iWt.values())
if full_orbit:
# A nested list that will eventually contain the k-points in each orbit.
orbit_list = [[] for _ in range(max(list(hashtable.values())))]
kpoint_index_list = list(hashtable.keys())
# Put the index of the k-points in orbit_list.
for kpt_i in kpoint_index_list:
orbit_list[hashtable[kpt_i]-1].append(kpt_i)
# Replace the indices by the actual k-points.
for i,orbit in enumerate(orbit_list):
for j,kpt_index in enumerate(orbit):
kpt = kpoint_list[kpt_index_conv[kpt_index]]
# Put points in lattice coordinates if option provided.
if kpt_coords == "lat":
kpt = np.dot(inv(rlattice_vectors), kpt)
orbit_list[i][j] = kpt
return orbit_list, orbit_weights
else:
if kpt_coords == "lat":
reduced_kpoints = np.dot(inv(rlattice_vectors), reduced_kpoints.T).T
return reduced_kpoints, orbit_weights
# def minkowski_reduce_basis(basis, eps):
# """Find the Minkowski representation of a basis.
# Args:
# basis(numpy.ndarray): a matrix with the generating vectors as columns.
# eps (int): a finite precision parameter in 10**(-eps).
# """
# if type(eps) != int:
# msg = ("eps must be an integer, cooresponds to 10**-eps")
# raise ValueError(msg.format(eps))
# return _minkowski_reduce_basis(basis.T, 10**(-eps)).T
def just_map_to_bz(grid, rlattice_vectors, coords="Cart", rtol=1e-4, atol=1e-6, eps=1e-10):
"""Map a grid into the first Brillouin zone in the Minkowski basis.
Args:
grid (list): a list of k-points in Cartesian coordinates.
rlattice_vectors (numpy.ndarray): a matrix whose columes are the
reciprocal lattice generating vectors.
coords (string): the coordinates of the returned k-points. Options include
"Cart" for Cartesian and "lat" for lattice.
eps (int): finite precision parameter that determines decimal rounded.
Returns:
reduced_grid (numpy.ndarray): a numpy array of grid points in the first
Brillouin zone in Minkowski space.
weights (numpy.ndarray): the k-point weights
"""
# Find the Minkowski basis.
mink_basis = minkowski_reduce_basis(rlattice_vectors, rtol=rtol, atol=atol, eps=eps)
# Initialize the grid that will be mapped to the BZ.
new_grid = []
# Loop over all points in the grid.
for i, pt in enumerate(grid):
# Move each point into the first unit cell.
pt = bring_into_cell(pt, mink_basis, rtol=rtol, atol=atol)
# Put the point in lattice coordinates.
pt_lat = np.dot(inv(rlattice_vectors), pt)
# Find the translationally equivalent points in the eight unit cells that
# share a vertex at the origin in lattice coordinates.
pts_lat = np.array([pt_lat + shift for shift in product([-1,0], repeat=3)])
# Put the translationally equivalent points in Cartesian coordinates.
pts = np.dot(rlattice_vectors, pts_lat.T).T
# Keep the point that is the closest to the origin.
new_grid.append(pts[np.argmin(norm(pts, axis=1))])
new_grid = np.array(new_grid)
if coords == "lat":
new_grid = np.dot(inv(rlattice_vectors), new_grid.T).T
return new_grid
elif coords == "Cart":
return new_grid
else:
msg = "Coordinate options include 'Cart' and 'lat'."
raise ValueError(msg)
def map_to_bz(grid, lattice_vectors, rlattice_vectors, grid_vectors, shift, atom_labels,
atom_positions, rtol=1e-5, atol=1e-8, eps=1e-10):
"""Map a grid into the first Brillouin zone in Minkowski space.
Args:
grid (list): a list of grid points in Cartesian coordinates.
rlattice_vectors (numpy.ndarray): a matrix whose columes are the
reciprocal lattice generating vectors.
grid_vectors (numpy.ndarray): the grid generating vectors.
shift (list or numpy.ndarray): the offset of the k-point grid in grid
coordinates.
eps (int): finite precision parameter that is the integer exponent in
10**(-eps).
Returns:
reduced_grid (numpy.ndarray): a numpy array of grid points in the first
Brillouin zone in Minkowski space.
weights (numpy.ndarray): the k-point weights
"""
# Reduce the grid and move into the unit cell.
# reduced_grid, weights = reduce_kpoint_list(grid, rlattice_vectors, grid_vectors,
# shift, eps)
# Uncomment this when symmetry reduction is fixed.
reduced_grid, weights = find_orbits(grid, lattice_vectors, rlattice_vectors,
grid_vectors, shift, atom_labels, atom_positions)
reduced_grid = np.array(reduced_grid)
# Find the Minkowski basis.
mink_basis = minkowski_reduce_basis(rlattice_vectors, rtol=rtol, atol=atol, eps=eps)
reduced_grid_copy = deepcopy(reduced_grid)
for i, pt1 in enumerate(reduced_grid_copy):
pt1 = bring_into_cell(pt1, mink_basis)
norm_pt1 = np.dot(pt1, pt1)
reduced_grid[i] = pt1
for n in product([-1,0], repeat=3):
pt2 = pt1 + np.dot(mink_basis, n)
norm_pt2 = np.dot(pt2, pt2)
if (norm_pt2 + eps) < norm_pt1:
norm_pt1 = norm_pt2
reduced_grid[i] = pt2
return reduced_grid, weights
def number_of_point_operators(lattice_type):
"""Return the number of point group operators for the provided lattice type.
Args:
lattice_type (str): the Bravais lattice.
Returns:
(int): the number of point group symmetry operators for the Bravais lattice.
"""
num_operators = [2, 4, 8, 12, 16, 24, 48]
lat_types = ['triclinic', 'monoclinic', 'orthorhombic', 'rhombohedral', 'tetragonal',
'hexagonal', 'cubic']
lat_dict = {i:j for i,j in zip(lat_types, num_operators)}
try:
return lat_dict[lattice_type]
except:
msg = ("Please provide a Bravais lattice, excluding atom centering, such as "
"'cubic' or 'hexagonal'.")
raise ValueError(msg.format(lattice_type))
def search_sphere(lat_vecs, eps=1e-9):
"""Find all the lattice points within a sphere whose radius is the same
as the length of the longest lattice vector.
Args:
lat_vecs (numpy.ndarray): the lattice vectors as columns of a 3x3 array.
eps (float): finite precision tolerance used when comparing norms of points.
Returns:
sphere_points (numpy.ndarray): a 1D array of points within the sphere.
"""
a0 = lat_vecs[:,0]
a1 = lat_vecs[:,1]
a2 = lat_vecs[:,2]
# Let's orthogonalize the lattice vectors be removing parallel components.
a0_hat = a0/norm(a0)
a1_hat = a1/norm(a1)
a1p = a1 - np.dot(a1, a0_hat)*a0_hat
a1p_hat = a1p/norm(a1p)
a2p = a2 - np.dot(a2, a1p_hat)*a1p_hat - np.dot(a2, a0_hat)*a0_hat
max_norm = max(norm(lat_vecs, axis=0))
max_indices = [int(np.ceil(max_norm/norm(a0) + eps)), int(np.ceil(max_norm/norm(a1p) + eps)),
int(np.ceil(max_norm/norm(a2p) + eps))]
imin = -max_indices[0]
imax = max_indices[0] + 1
jmin = -max_indices[1]
jmax = max_indices[1] + 1
kmin = -max_indices[2]
kmax = max_indices[2] + 1
sphere_pts = []
for i,j,k in it.product(range(imin, imax), range(jmin, jmax), range(kmin, kmax)):
pt = np.dot(lat_vecs, [i,j,k])
if (np.dot(pt, pt) - eps) < max_norm**2:
sphere_pts.append(pt)
return np.array(sphere_pts)
def get_point_group(lat_vecs, rtol = 1e-4, atol=1e-6, eps=1e-9):
"""Get the point group of a lattice.
Args:
lat_vecs (numpy.ndarray): a 3x3 array with the lattice vectors as columns.
rtol (float): relative tolerance for floating point comparisons.
atol (float): absolute tolerance for floating point comparisions.
eps (float): finite precision parameter for identifying points within a sphere.
Returns:
point_group (numpy.ndarray): a list of rotations, reflections and improper
rotations.
"""
pts = search_sphere(lat_vecs, eps)
a1 = lat_vecs[:,0]
a2 = lat_vecs[:,1]
a3 = lat_vecs[:,2]
inv_lat_vecs = inv(lat_vecs)
point_group = []
i = 0
for p1,p2,p3 in it.permutations(pts, 3):
# In a unitary transformation, the length of the vectors will be
# preserved.
if (np.isclose(np.dot(p1,p1), np.dot(a1,a1), rtol=rtol, atol=atol) and
np.isclose(np.dot(p2,p2), np.dot(a2,a2), rtol=rtol, atol=atol) and
np.isclose(np.dot(p3,p3), np.dot(a3,a3), rtol=rtol, atol=atol)):
new_lat_vecs = np.transpose([p1,p2,p3])
# The volume of a parallelepiped given by the new basis should
# be the same.
if np.isclose(abs(det(new_lat_vecs)), abs(det(lat_vecs)), rtol=rtol,
atol=atol):
op = np.dot(new_lat_vecs, inv_lat_vecs)
# Check that the rotation, reflection, or improper rotation
# is an orthogonal matrix.
if np.allclose(np.eye(3), np.dot(op, op.T), rtol=rtol, atol=atol):
# Make sure this operator is unique.
if not check_contained([op], point_group, rtol=rtol, atol=atol):
point_group.append(op)
return point_group
def get_space_group(lattice_vectors, atom_labels, atom_positions, coords="lat",
rtol=1e-4, atol=1e-6, eps=1e-10):
"""Get the space group (point group and fractional translations) of a crystal.
Args:
lattice_vectors (list or numpy.ndarray): the lattice vectors, in Cartesian
coordinates, as columns of a 3x3 array.
atom_labels (list): a list of atom labels. Each label should be distince for each
atomic species. The labels can be of type string or integer but must be in the
same order as atomic_basis.
atom_positions (list or numpy.ndarray): a list of atom positions in Cartesian
(default) or lattice coordinates.
coords (bool): specifies the coordinate system of the atomic basis. Anything other
than "lat", for lattice coordinates, will default to Cartesian.
rtol (float): relative tolerance for finding point group.
atol (float): absolute tolerance for finding point group.
eps (float): finite precision parameter used when finding points within a sphere.
Returns:
point_group (list): a list of point group operations.
translations (list): a list of translations.
"""
def check_atom_equivalency(atom_label_i, atom_position_i, atom_labels, atom_positions):
"""Check if an atom is equivalent to another atom in a list of atoms. Two atoms
are equivalent if they have the same label and are located at the same position.
Args:
atom_label_i (int): the label of the atom being compared.
atom_position_i (list or numpy.ndarray): the position of the atom being
compared in 3-space.
atom_labels (list or numpy.ndarray): a list of atom labels.
atom_positions (list or numpy.ndarray): a list of atom positions in 3-space.
Returns:
_ (bool): return `True` if the atom is equivalent to an atom in the list of
atoms.
"""
# Check to see if this atom that was rotated, translated, and brought
# into the unit cell is equivalent to one of the other atoms in the atomic
# basis.
# Find the location of the atom
label_index = find_point_indices([atom_label_i], atom_labels)
position_index = find_point_indices(atom_position_i, atom_positions)
if check_contained(position_index, label_index):
return True
else:
return False
# Initialize the point group and fractional translations subgroup.
point_group = []
translations = []
atomic_basis = deepcopy(np.array(atom_positions))
# Put atomic positions in Cartesian coordinates if necessary.
if coords == "lat":
atomic_basis = np.dot(lattice_vectors, atomic_basis.T).T
# Bring the atom's positions into the first unit cell.
atomic_basis = np.array([bring_into_cell(ab, lattice_vectors, rtol=rtol, atol=atol)
for ab in atomic_basis])
# Get the point group of the lattice.
lattice_pointgroup = get_point_group(lattice_vectors, rtol=rtol, atol=atol, eps=eps)
# The possible translations are between atoms of the same type. The translations
# between atoms of *one* type will be, in every case, a *superset* of all translations
# that may be in the spacegroup. We'll generate this superset of translations and keep
# only those that are valid for all atom types.
# Grab the type and position of the first atom.
first_atom_type = atom_labels[0]
first_atom_pos = atomic_basis[0]
# Loop through the point group operators of the parent lattice.
for lpg in lattice_pointgroup:
# Rotate the first atom.
rot_first_atom_pos = np.dot(lpg, first_atom_pos)
# Loop over all the atoms.
for atom_type_i,atom_pos_i in zip(atom_labels, atomic_basis):
# If the atoms are diffent types, move on to the next atom.
if first_atom_type != atom_type_i:
continue
# Calculate the vector that points from the first atom's rotated position to
# this atom's position and then move it into the first unit cell. This is one
# of the translations in the superset of fractional translations for the first
# atom type.
frac_trans = bring_into_cell(atom_pos_i - rot_first_atom_pos, lattice_vectors,
rtol=rtol, atol=atol)
# Verify that this rotation and fractional translation map each atom onto
# another atom of its the same type.
for atom_type_j,atom_pos_j in zip(atom_labels, atomic_basis):
# Rotate, translate, and then bring this atom into the unit cell in the
# first unit cell.
rot_atom_pos_j = bring_into_cell(np.dot(lpg, atom_pos_j) + frac_trans,
lattice_vectors, rtol=rtol, atol=atol)
# Check to see if this atom that was rotated, translated, and brought
# into the unit cell is equivalent to one of the other atoms in the atomic
# basis.
equivalent = check_atom_equivalency(atom_type_j, rot_atom_pos_j,
atom_labels, atomic_basis)
# If this atom isn't equivalent to one of the others, it isn't a valid
# rotation + translation.
if not equivalent:
break
# If all the atoms get mapped onto atoms of their same type, add this
# translation and rotation to the space group.
# print(equivalent)
if equivalent:
point_group.append(lpg)
translations.append(frac_trans)
return point_group, translations
def equivalent_orbits(orbits_list0, orbits_list1, rtol=1e-4, atol=1e-6):
"""Check that two orbits are equivalent.
Args:
orbit_list0 (list or numpy.ndarray): a list of k-points in orbits.
orbit_list1 (list or numpy.ndarray): a list of k-points in orbits.
rtol (float): the relative tolerance
atol (float): the absolute tolerance
Returns:
_ (bool): true if the two lists of orbits are equivalent
"""
def check_orbits(orbits_list0, orbits_list1):
"""Check that the orbits of one list of orbits are a subset of another
list of orbits.
Args:
orbit_list0 (list or numpy.ndarray): a list of k-points in orbits.
orbit_list1 (list or numpy.ndarray): a list of k-points in orbits.
"""
orbit_lists_equal = []
# Grab an orbit from the first list.
for orbit0 in orbits_list0:
orbit_equal = []
# Grab an orbit from the second list.
for orbit1 in orbits_list1:
orbit_equal.append([])
# See if all the k-points in the first orbit are in the second orbit.
for kpt in orbit0:
orbit_equal[-1].append(check_contained([kpt], orbit1, rtol=rtol, atol=atol))
# An orbit is equivalent to another if all it's k-points are in another.
orbit_equal[-1] = all(orbit_equal[-1])
orbit_lists_equal.append(any(orbit_equal))
return all(orbit_lists_equal)
# If the two lists of orbits are subsets of each other, they are equivalent.
return all([check_orbits(orbits_list0, orbits_list1),
check_orbits(orbits_list1, orbits_list0)])
def gaussian_reduction(v1, v2, eps=1e-10):
"""Gaussian reduced two vectors by subtracting multiples of the shorter
vector from the longer. Repeat this process on both vectors until the
shortest set is obtained.
Args:
v1 (list or numpy.ndarray): a vector in three space in Cartesian
coordinates.
v2 (list or numpy.ndarray): a vector in three space in Cartesian
coordinates.
eps (float): a finite precision tolerance used for comparing lengths
of of vectors.
Returns:
v1 (list or numpy.ndarray): a Gaussian reduced vector in three
space in Cartesian coordinates.
v2 (list or numpy.ndarray): a Gaussian reduced vector in three
space in Cartesian coordinates.
"""
# Make sure the norm of v1 is smaller than v2.
vecs = np.array([v1, v2])
v1,v2 = vecs[np.argsort(norm(vecs, axis=1))]
reduced = False
it = 0
while not reduced:
it += 1
if it > 10:
msg = "Failed to Gaussian reduce the vectors after {} iterations".format(it-1)
raise ValueError(msg)
# Subtract an integer multiple of v1 from v2.
v2 -= np.round(np.dot(v1, v2)/np.dot(v1, v1))*v1
# If v2 is still longer than v1, the vectors have been reduced.
if (norm(v1) - eps) < norm(v2):
reduced = True
# Make sure the norm of v1 is smaller than v2.
vecs = np.array([v1, v2])
v1,v2 = vecs[np.argsort(norm(vecs, axis=1))]
return v1, v2
def reduce_lattice_vector(lattice_vectors, rtol=1e-4, atol=1e-6, eps=1e-10):
"""Make last lattice vector as short as possible while remaining in an
affine plane that passes through the end of the last lattice vector.
Args:
lattice_vectors (numpy.ndarray): the lattice generating vectors as columns
of a 3x3 array. The first two columns define a plane which is parallel
to the affine plane the third vector passes through. The third column
is the lattice vector being reduced.
rtol (float): a relative tolerance used when verifying the lattice vectors
are linearly independent and when verifying the input lattice vectors
are lattice points of the reduced lattice vectors.
atol (float): an absolute tolerance used when verifying the lattice vectors
are linearly independent and when verifying the input lattice vectors
are lattice points of the reduced lattice vectors.
eps (int): a finite precision tolerance that is added to the norms of vectors
when comparing lengths and to a point converted into lattice coordinates
before finding a nearby lattice point.
Returns:
reduced_lattice_vectors (numpy.ndarray): the generating vectors with the two in
the first two columns unchanged and the third reduced.
"""
# Assign a variable for each of the lattice vectors.
v1,v2,v3 = lattice_vectors.T
# Gaussian reduce the first two lattice vectors, v1 and v2.
# After reduction, the lattice point closest to the projection of v3
# in v1-v2 plane is guaranteed to be one of the corners of the unit
# cell enclosing the projection of v3.
v1r,v2r = gaussian_reduction(v1, v2, eps)
# Replace the first two lattice vectors with the Gaussian reduced ones.
temp_lattice_vectors = np.array([v1r, v2r, v3]).T
# Verify the new basis is linearly independent.
if np.isclose(det(temp_lattice_vectors), 0, rtol=rtol, atol=atol):
msg = ("After Gaussian reduction of the first two lattice vectors, "
"the lattice generating vectors are linearly dependent.")
raise ValueError(msg)
# Find the point in the v1-v2 affine plane that is closest
# to the origin
# Find a vector orthogonal and normal to the v1-v2 plane
v_on = np.cross(v1r, v2r)/norm(np.cross(v1r, v2r))
# Find the point on the plane closest to the origin
closest_pt = v3 - v_on*np.dot(v_on, v3)
# Put this point in lattice coordinates and then round down to the nearest
# integer
closest_lat_pt = np.floor(np.dot(inv(temp_lattice_vectors),
closest_pt) + eps).astype(int)
# Make sure this point isn't parallel to the v1-v2 plane.
if not np.isclose(np.dot(closest_pt, v_on), 0, rtol=rtol, atol=atol):
msg = ("After Gaussian reduction, the latttice vectors are "
"linearly dependent.")
raise ValueError(msg)
# Find the four lattice points that enclose this point in lattice and Cartesian
# coordinates.
corners_lat = np.array([list(i) + [0] for i in itertools.product([0,1], repeat=2)]) + (
closest_lat_pt)
corners_cart = np.dot(temp_lattice_vectors, corners_lat.T).T
# Calculate distances from the corners to `closest_pt`.
corner_distances = norm(corners_cart - closest_pt, axis=1)
# Find corner with the shortest distance.
corner_index = np.argmin(corner_distances)
# Calculate the reduced vector.
try:
v3r = v3 - corners_cart[corner_index]
except:
msg = "Failed to reduce the lattice vector."
raise ValueError(msg)
reduced_lattice_vectors = np.array([v1r, v2r, v3r]).T
# Verify that the old lattice vectors are an integer combination of the new
# lattice vectors.
check, N = check_commensurate(reduced_lattice_vectors, lattice_vectors,
rtol=rtol, atol=atol)
if not check:
msg = ("The reduced lattice generates a different lattice than the input"
" lattice.")
raise ValueError(msg.format(grid_vectors))
else:
return reduced_lattice_vectors
def check_minkowski_conditions(lattice_basis, eps=1e-10):
"""Verify a lattice basis satisfies the Minkowski conditions.
Args:
lattice_basis (numpy.ndarray): the lattice generating vectors as columns
of a 3x3 array.
eps (int): a finite precision parameter that is added to the norm of vectors
when comparing lengths.
Returns:
minkowski_check (bool): A boolean whose value is `True` if the Minkowski
conditions are satisfied.
"""
minkowski_check = True
b1, b2, b3 = lattice_basis.T
if (norm(b2) + eps) < norm(b1):
print("Minkowski condition |b1| < |b2| failed.")
minkowski_check = False
if (norm(b3) + eps) < norm(b2):
print("Minkowski condition |b2| < |b3| failed.")
minkowski_check = False
if (norm(b1 + b2) + eps) < norm(b2):
print("Minkowski condition |b2| < |b1 + b2| failed.")
minkowski_check = False
if (norm(b1 - b2) + eps) < norm(b2):
print("Minkowski condition |b1 - b2| < |b2| failed.")
minkowski_check = False
if (norm(b1 + b3) + eps) < norm(b3):
print("Minkowski condition |b3| < |b1 + b3| failed.")
minkowski_check = False
if (norm(b3 - b1) + eps) < norm(b3):
print("Minkowski condition |b3 - b1| < |b3| failed. ")
minkowski_check = False
if (norm(b2 + b3) + eps) < norm(b3):
print("Minkowski condition |b3| < |b2 + b3| failed. ")
minkowski_check = False
if (norm(b3 - b2) + eps) < norm(b3):
print("Minkowski condition |b3| < |b3 - b2| failed.")
minkowski_check = False
if (norm(b1 + b2 + b3) + eps) < norm(b3):
print("Minkowski condition |b3| < |b1 + b2 + b3| failed.")
minkowski_check = False
if (norm(b1 - b2 + b3) + eps) < norm(b3):
print("Minkowski condition |b3| < |b1 - b2 + b3| failed.")
minkowski_check = False
if (norm(b1 + b2 - b3) + eps) < norm(b3):
print("Minkowski condition |b3| < |b1 + b2 - b3| failed.")
minkowski_check = False
if (norm(b1 - b2 - b3) + eps) < norm(b3):
print("Minkowski condition |b3| < |b1 - b2 - b3| failed.")
minkowski_check = False
return minkowski_check
def minkowski_reduce_basis(lattice_basis, rtol=1e-4, atol=1e-6, eps=1e-10):
"""Minkowski reduce the basis of a lattice.
Args:
lattice_basis (numpy.ndarray): the lattice generating vectors as columns
of a 3x3 array.
rtol (float): a relative tolerance used when comparing determinates to zero, and
used as an input to `reduce_lattice_vector`.
atol (float): an absolute tolerance used when comparing determinates to zero, and
used as an input to `reduce_lattice_vector`.
eps (int): a finite precision tolerance that is added to the norms of vectors
when comparing lengths.
Returns:
lat_vecs (numpy.ndarray): the Minkowski reduced lattice vectors as columns
of a 3x3 array.
"""
if np.isclose(det(lattice_basis), 0, rtol=rtol, atol=atol):
msg = "Lattice basis is linearly dependent."
raise ValueError(msg)
limit = 10
lat_vecs = deepcopy(lattice_basis)
for _ in range(limit):
# Sort the lattice vectors by their norms in ascending order.
lat_vecs = lat_vecs.T[np.argsort(norm(lat_vecs, axis=0))].T
# Reduce the lattice vector in the last column.
lat_vecs = reduce_lattice_vector(lat_vecs, rtol=rtol, atol=atol, eps=eps)
if norm( lat_vecs[:,2] ) >= (norm( lat_vecs[:,1] ) - eps):
break
# Check that the Minkowski conditions are satisfied.
if not check_minkowski_conditions(lat_vecs, eps):
msg = "Failed to meet Minkowski reduction conditions after {} iterations".format(limit)
raise ValueError(msg)
# Sort the lattice vectors by their norms in ascending order.
lat_vecs = lat_vecs.T[np.argsort(norm(lat_vecs, axis=0))].T
# We want the determinant to be positive. Technically, this is no longer a
# Minkowski reduced basis but it shouldn't physically affect anything and the
# basis is still as orthogonal as possible.
if (det(lat_vecs) + eps) < 0:
lat_vecs = swap_rows_columns(lat_vecs, 1, 2, rows=False)
# lat_vecs[:, 1], lat_vecs[:, 2] = lat_vecs[:, 2], lat_vecs[:, 1].copy()
return lat_vecs
def check_commensurate(lattice, sublattice, rtol=1e-5, atol=1e-8):
"""Check if a lattice is commensurate with a sublattice.
Args:
lattice (numpy.ndarray): lattice generating vectors as columns of a 3x3 array.
sublattice (numpy.ndarray): sublattice generating vectors as columns of a 3x3
array.
Returns:
_ (bool): if the lattice and sublattice are commensurate, return `True`.
N (numpy.ndarray): if the lattice and sublattice are commensurate, return an array
of ints. Otherwise, return an array of floats.
"""
N = np.dot(inv(lattice), sublattice)
if np.allclose(N, np.round(N), atol=atol, rtol=rtol):
N = np.round(N).astype(int)
return True, N
else:
return False, N
def get_space_group_size(file_loc, coords="lat", rtol=1e-4, atol=1e-6, eps=1e-10):
"""Get the size of the point group.
Args:
file_loc (str): the location of the VASP POSCAR file.
Returns:
_ (int): the number of operations in the space group.
"""
data = read_poscar(file_loc)
lat_vecs = data["lattice vectors"]
atom_labels = data["atomic basis"]["atom labels"]
atom_positions = data["atomic basis"]["atom positions"]
translations, point_group = get_space_group(lat_vecs, atom_labels, atom_positions, coords=coords,
rtol=rtol, atol=atol, eps=eps)
return len(point_group)
|
jerjorg/BZI
|
BZI/symmetry.py
|
Python
|
gpl-3.0
| 135,404
|
[
"ASE",
"CRYSTAL",
"Gaussian",
"VASP"
] |
8208d5017a7c97d2e20182e1ad3f0e9cd126ae7f8bc46fa7ff3ed29de0888034
|
#!/usr/bin/env python
#
# $File: defdict.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population([100]*2, loci=1)
sim.initGenotype(pop, freq=[0, 0.2, 0.8], subPops=0)
sim.initGenotype(pop, freq=[0.2, 0.8], subPops=1)
sim.stat(pop, alleleFreq=0, vars=['alleleFreq_sp'])
for sp in range(2):
print('Subpop %d (with %d alleles): ' % (sp, len(pop.dvars(sp).alleleFreq[0])))
for a in range(3):
print('%.2f ' % pop.dvars(sp).alleleFreq[0][a])
|
BoPeng/simuPOP
|
docs/defdict.py
|
Python
|
gpl-2.0
| 1,481
|
[
"VisIt"
] |
0be80283603b6ee76366e31ff41679d6417dc71ad6a8e8a20f1e7f693ed6691b
|
from neuron import h
from cell_template import Cell
import helper_functions as hf
class Mn(Cell): #### Inherits from Cell
"""Motoneuron"""
#### __init__ is gone and handled in Cell.
#### We can override __init__ completely, or do some of
#### our own initialization first, and then let Cell do its
#### thing, and then do a bit more ourselves with "super".
####
from Mn_geometry_output3 import shape_3D
def __init__(self):
self.d_lambda = 0.1
self.soma = []
self.dend = []
self.ndend = 0
super(Mn, self).__init__()
#
def create_sections(self):
self.soma = h.Section(name = 'soma', cell = self)
self.dend = [h.Section(name = 'dend_%d' % x, cell = self) for x in range(249)]
self.ndend = len(self.dend)
self.all.append(sec=self.soma)
for a in range(self.ndend):
self.all.append(sec=self.dend[a])
#
def build_topology(self):
"""Connect the sections of the cell to build a tree."""
# Happens in shape_3D for the motor neuron
def build_subsets(self):
"""Build subset lists. """
#
def define_geometry(self):
"""Set the 3D geometry of the cell."""
self.shape_3D()
#
def define_biophysics(self):
"""Assign the membrane properties across the cell."""
self.soma.insert("motoneuron")
for dend in self.dend:
dend.nseg = 11
dend.Ra = 200
dend.cm = 2
dend.insert("pas")
def create_synapses(self):
"""Add an exponentially decaying synapse in the middle
of the dendrite. Set its tau to 2ms, and append this
synapse to the synlist of the cell."""
syn = h.ExpSyn(self.dend[0](0.1))
syn.tau = 3
self.synlist.append(syn) # synlist is defined in Cell
syn = h.ExpSyn(self.dend[5](0.1))
syn.tau = 3
self.synlist.append(syn) # synlist is defined in Cell
syn = h.Exp2Syn(self.soma(0.3))
syn.tau1 = 3
syn.tau2 = 10
syn.e = -85
self.synlist.append(syn) # synlist is defined in Cell
syn = h.Exp2Syn(self.soma(0.5))
syn.tau1 = 3
syn.tau2 = 10
syn.e = -85
self.synlist.append(syn) # synlist is defined in Cell
|
penguinscontrol/Spinal-Cord-Modeling
|
Python/Mn_template.py
|
Python
|
gpl-2.0
| 2,319
|
[
"NEURON"
] |
4bbba36c39617e6720f6114f078c7036db9d14cf71e685d51d38e9ba3c4e7a7d
|
#!/usr/bin/python
#
# This example shows how to use the MITIE Python API to train a named_entity_extractor.
#
#
import sys, os
# Make sure you put the mitielib folder into the python search path. There are
# a lot of ways to do this, here we do it programmatically with the following
# two statements:
parent = os.path.dirname(os.path.realpath(__file__))
sys.path.append(parent + '/../../mitielib')
from mitie import *
# When you train a named_entity_extractor you need to get a dataset of sentences (or
# sentence or paragraph length chunks of text) where each sentence is annotated with the
# entities you want to find. For example, if we wanted to find all the names of people and
# organizations then we would need to get a bunch of sentences with examples of person
# names and organizations in them. Here is an example:
# My name is Davis King and I work for MIT.
# "Davis King" is a person name and "MIT" is an organization.
#
# You then give MITIE these example sentences with their entity annotations and it will
# learn to detect them. That is what we do below.
# So let's make the first training example. We use the sentence above. Note that the
# training API takes tokenized sentences. It is up to you how you tokenize them, you
# can use the default tokenizer that comes with MITIE or any other method you like.
sample = ner_training_instance(["My", "name", "is", "Davis", "King", "and", "I", "work", "for", "MIT", "."])
# Now that we have the tokens stored, we add the entity annotations. The first
# annotation indicates that the tokens in the range(3,5) is a person. I.e.
# "Davis King" is a person name. Note that you can use any strings as the
# labels. Here we use "person" and "org" but you could use any labels you
# like.
sample.add_entity(xrange(3,5), "person")
sample.add_entity(xrange(9,10), "org")
# And we add another training example
sample2 = ner_training_instance(["The", "other", "day", "at", "work", "I", "saw", "Brian", "Smith", "from", "CMU", "."])
sample2.add_entity(xrange(7,9), "person")
sample2.add_entity(xrange(10,11), "org")
# Now that we have some annotated example sentences we can create the object that does
# the actual training, the ner_trainer. The constructor for this object takes a string
# that should contain the file name for a saved mitie::total_word_feature_extractor.
# The total_word_feature_extractor is MITIE's primary method for analyzing words and
# is created by the tool in the MITIE/tools/wordrep folder. The wordrep tool analyzes
# a large document corpus, learns important word statistics, and then outputs a
# total_word_feature_extractor that is knowledgeable about a particular language (e.g.
# English). MITIE comes with a total_word_feature_extractor for English so that is
# what we use here. But if you need to make your own you do so using a command line
# statement like:
# wordrep -e a_folder_containing_only_text_files
# and wordrep will create a total_word_feature_extractor.dat based on the supplied
# text files. Note that wordrep can take a long time to run or require a lot of RAM
# if a large text dataset is given. So use a powerful machine and be patient.
trainer = ner_trainer("../../MITIE-models/english/total_word_feature_extractor.dat")
# Don't forget to add the training data. Here we have only two examples, but for real
# uses you need to have thousands.
trainer.add(sample)
trainer.add(sample2)
# The trainer can take advantage of a multi-core CPU. So set the number of threads
# equal to the number of processing cores for maximum training speed.
trainer.num_threads = 4
# This function does the work of training. Note that it can take a long time to run
# when using larger training datasets. So be patient.
ner = trainer.train()
# Now that training is done we can save the ner object to disk like so. This will
# allow you to load the model back in using a statement like:
# ner = named_entity_extractor("new_ner_model.dat").
ner.save_to_disk("new_ner_model.dat")
# But now let's try out the ner object. It was only trained on a small dataset but it
# has still learned a little. So let's give it a whirl. But first, print a list of
# possible tags. In this case, it is just "person" and "org".
print "tags:", ner.get_possible_ner_tags()
# Now let's make up a test sentence and ask the ner object to find the entities.
tokens = ["I", "met", "with", "John", "Becker", "at", "HBU", "."]
entities = ner.extract_entities(tokens)
# Happily, it found the correct answers, "John Becker" and "HBU" in this case which we
# print out below.
print "\nEntities found:", entities
print "\nNumber of entities detected:", len(entities)
for e in entities:
range = e[0]
tag = e[1]
entity_text = " ".join(tokens[i] for i in range)
print " " + tag + ": " + entity_text
|
siraj/plexydesk-1
|
deps/mitie/examples/python/train_ner.py
|
Python
|
lgpl-3.0
| 4,833
|
[
"Brian"
] |
5e39bb6fcef402e1901763f6c08a18b50ff40af86ae7222b76fa70528bd0fc8d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
The initial version of this module was based on a similar implementation
present in FireWorks (https://pypi.python.org/pypi/FireWorks).
Work done by D. Waroquiers, A. Jain, and M. Kocher.
The main difference wrt the Fireworks implementation is that the QueueAdapter
objects provide a programmatic interface for setting important attributes
such as the number of MPI nodes, the number of OMP threads and the memory requirements.
This programmatic interface is used by the `TaskManager` for optimizing the parameters
of the run before submitting the job (Abinit provides the autoparal option that
allows one to get a list of parallel configuration and their expected efficiency).
"""
from __future__ import print_function, division, unicode_literals
import sys
import os
import abc
import string
import copy
import getpass
import six
import json
import math
from . import qutils as qu
from collections import namedtuple
from subprocess import Popen, PIPE
from pymatgen.util.io_utils import AtomicFile
from monty.string import is_string, list_strings
from monty.collections import AttrDict
from monty.functools import lazy_property
from monty.inspect import all_subclasses
from monty.io import FileLock
from monty.json import MSONable
from pymatgen.core.units import Memory
from .utils import Condition
from .launcher import ScriptEditor
from .qjobs import QueueJob
import logging
logger = logging.getLogger(__name__)
__all__ = [
"make_qadapter",
]
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
class SubmitResults(namedtuple("SubmitResult", "qid, out, err, process")):
"""
named tuple createc by the concrete implementation of _submit_to_que to pass the results of the process of
submitting the jobfile to the que.
qid: queue id of the submission
out: stdout of the submission
err: stdrr of the submisison
process: process object of the submission
"""
class MpiRunner(object):
"""
This object provides an abstraction for the mpirunner provided
by the different MPI libraries. It's main task is handling the
different syntax and options supported by the different mpirunners.
"""
def __init__(self, name, type=None, options=""):
"""
Args:
name (str): Name of the mpirunner e.g. mpirun, mpiexec, srun ...
type: Type of the mpirunner (not used at present)
options (str): String with options passed to the mpi runner e.g. "--bind-to None"
"""
self.name = name if name else ""
self.type = None
self.options = str(options)
def string_to_run(self, qad, executable, stdin=None, stdout=None, stderr=None, exec_args=None):
"""
Build and return a string with the command required to launch `executable` with the qadapter `qad`.
Args
qad: Qadapter instance.
executable (str): Executable name or path
stdin (str): Name of the file to be used as standard input. None means no redirection.
stdout (str): Name of the file to be used as standard output. None means no redirection.
stderr (str): Name of the file to be used as standard error. None means no redirection.
exec_args: Optional list of strings with options passed to `executable`.
Return:
String with command to execute.
"""
stdin = "< " + stdin if stdin is not None else ""
stdout = "> " + stdout if stdout is not None else ""
stderr = "2> " + stderr if stderr is not None else ""
if exec_args:
executable = executable + " " + " ".join(list_strings(exec_args))
basename = os.path.basename(self.name)
if basename in ["mpirun", "mpiexec", "srun"]:
if self.type is None:
# $MPIRUN -n $MPI_PROCS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR
num_opt = "-n " + str(qad.mpi_procs)
cmd = " ".join([self.name, self.options, num_opt, executable, stdin, stdout, stderr])
else:
raise NotImplementedError("type %s is not supported!" % self.type)
elif basename == "runjob":
#runjob --ranks-per-node 2 --exp-env OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR
#runjob -n 2 --exp-env=OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR
# exe must be absolute path or relative to cwd.
bg_size, rpn = qad.bgsize_rankspernode()
#num_opt = "-n " + str(qad.mpi_procs)
num_opt = "--ranks-per-node " + str(rpn)
cmd = " ".join([self.name, self.options, num_opt, "--exp-env OMP_NUM_THREADS",
"--exe `which " + executable + "` ", stdin, stdout, stderr])
else:
if qad.mpi_procs != 1:
raise ValueError("Cannot use mpi_procs > when mpi_runner basename=%s" % basename)
cmd = " ".join([executable, stdin, stdout, stderr])
return cmd
#@property
#def has_mpirun(self):
# """True if we are running via mpirun, mpiexec ..."""
# return self.name in ("mpirun", "mpiexec", "srun", "runjob")
class OmpEnv(AttrDict):
"""
Dictionary with the OpenMP environment variables
see https://computing.llnl.gov/tutorials/openMP/#EnvironmentVariables
"""
_KEYS = [
"OMP_SCHEDULE",
"OMP_NUM_THREADS",
"OMP_DYNAMIC",
"OMP_PROC_BIND",
"OMP_NESTED",
"OMP_STACKSIZE",
"OMP_WAIT_POLICY",
"OMP_MAX_ACTIVE_LEVELS",
"OMP_THREAD_LIMIT",
"OMP_STACKSIZE",
"OMP_PROC_BIND",
]
@classmethod
def as_ompenv(cls, obj):
"""Convert an object into a OmpEnv"""
if isinstance(obj, cls): return obj
if obj is None: return cls()
return cls(**obj)
def __init__(self, *args, **kwargs):
"""
Constructor method inherited from dictionary:
>>> assert OmpEnv(OMP_NUM_THREADS=1).OMP_NUM_THREADS == 1
To create an instance from an INI file, use:
OmpEnv.from_file(filename)
"""
super(OmpEnv, self).__init__(*args, **kwargs)
err_msg = ""
for key, value in self.items():
self[key] = str(value)
if key not in self._KEYS:
err_msg += "unknown option %s\n" % key
if err_msg:
raise ValueError(err_msg)
def export_str(self):
"""Return a string with the bash statements needed to setup the OMP env."""
return "\n".join("export %s=%s" % (k, v) for k, v in self.items())
class Hardware(object):
"""
This object collects information on the hardware available in a given queue.
Basic definitions:
- A node refers to the physical box, i.e. cpu sockets with north/south switches connecting memory systems
and extension cards, e.g. disks, nics, and accelerators
- A cpu socket is the connector to these systems and the cpu cores
- A cpu core is an independent computing with its own computing pipeline, logical units, and memory controller.
Each cpu core will be able to service a number of cpu threads, each having an independent instruction stream
but sharing the cores memory controller and other logical units.
"""
def __init__(self, **kwargs):
self.num_nodes = int(kwargs.pop("num_nodes"))
self.sockets_per_node = int(kwargs.pop("sockets_per_node"))
self.cores_per_socket = int(kwargs.pop("cores_per_socket"))
# Convert memory to megabytes.
m = str(kwargs.pop("mem_per_node"))
self.mem_per_node = int(Memory.from_string(m).to("Mb"))
if self.mem_per_node <= 0 or self.sockets_per_node <= 0 or self.cores_per_socket <= 0:
raise ValueError("invalid parameters: %s" % kwargs)
if kwargs:
raise ValueError("Found invalid keywords in the partition section:\n %s" % list(kwargs.keys()))
def __str__(self):
"""String representation."""
lines = []
app = lines.append
app(" num_nodes: %d, sockets_per_node: %d, cores_per_socket: %d, mem_per_node %s," %
(self.num_nodes, self.sockets_per_node, self.cores_per_socket, self.mem_per_node))
return "\n".join(lines)
@property
def num_cores(self):
"""Total number of cores available"""
return self.cores_per_socket * self.sockets_per_node * self.num_nodes
@property
def cores_per_node(self):
"""Number of cores per node."""
return self.cores_per_socket * self.sockets_per_node
@property
def mem_per_core(self):
"""Memory available on a single node."""
return self.mem_per_node / self.cores_per_node
def can_use_omp_threads(self, omp_threads):
"""True if omp_threads fit in a node."""
return self.cores_per_node >= omp_threads
def divmod_node(self, mpi_procs, omp_threads):
"""Use divmod to compute (num_nodes, rest_cores)"""
return divmod(mpi_procs * omp_threads, self.cores_per_node)
def as_dict(self):
return {'num_nodes': self.num_nodes,
'sockets_per_node': self.sockets_per_node,
'cores_per_socket': self.cores_per_socket,
'mem_per_node': str(Memory(val=self.mem_per_node, unit='Mb'))}
@classmethod
def from_dict(cls, dd):
return cls(num_nodes=dd['num_nodes'],
sockets_per_node=dd['sockets_per_node'],
cores_per_socket=dd['cores_per_socket'],
mem_per_node=dd['mem_per_node'])
class _ExcludeNodesFile(object):
"""
This file contains the list of nodes to be excluded.
Nodes are indexed by queue name.
"""
DIRPATH = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
FILEPATH = os.path.join(DIRPATH, "exclude_nodes.json")
def __init__(self):
if not os.path.exists(self.FILEPATH):
if not os.path.exists(self.DIRPATH): os.makedirs(self.DIRPATH)
with FileLock(self.FILEPATH):
with open(self.FILEPATH, "w") as fh:
json.dump({}, fh)
def read_nodes(self, qname):
with open(self.FILEPATH, "w") as fh:
return json.load(fh).get(qname, [])
def add_nodes(self, qname, nodes):
nodes = (nodes,) if not isinstance(nodes, (tuple, list)) else nodes
with FileLock(self.FILEPATH):
with AtomicFile(self.FILEPATH, mode="w+") as fh:
d = json.load(fh)
if qname in d:
d["qname"].extend(nodes)
d["qname"] = list(set(d["qname"]))
else:
d["qname"] = nodes
json.dump(d, fh)
_EXCL_NODES_FILE = _ExcludeNodesFile()
def show_qparams(qtype, stream=sys.stdout):
"""Print to the given stream the template of the :class:`QueueAdapter` of type `qtype`."""
for cls in all_subclasses(QueueAdapter):
if cls.QTYPE == qtype: return stream.write(cls.QTEMPLATE)
raise ValueError("Cannot find class associated to qtype %s" % qtype)
def all_qtypes():
"""Return sorted list with all qtypes supported."""
return sorted([cls.QTYPE for cls in all_subclasses(QueueAdapter)])
def make_qadapter(**kwargs):
"""
Return the concrete :class:`QueueAdapter` class from a string.
Note that one can register a customized version with:
.. example::
from qadapters import SlurmAdapter
class MyAdapter(SlurmAdapter):
QTYPE = "myslurm"
# Add your customized code here
# Register your class.
SlurmAdapter.register(MyAdapter)
make_qadapter(qtype="myslurm", **kwargs)
.. warning::
MyAdapter should be pickleable, hence one should declare it
at the module level so that pickle can import it at run-time.
"""
# Get all known subclasses of QueueAdapter.
d = {c.QTYPE: c for c in all_subclasses(QueueAdapter)}
# Preventive copy before pop
kwargs = copy.deepcopy(kwargs)
qtype = kwargs["queue"].pop("qtype")
return d[qtype](**kwargs)
class QScriptTemplate(string.Template):
delimiter = '$$'
class QueueAdapterError(Exception):
"""Base Error class for exceptions raise by QueueAdapter."""
class MaxNumLaunchesError(QueueAdapterError):
"""Raised by `submit_to_queue` if we try to submit more than `max_num_launches` times."""
class QueueAdapter(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
The `QueueAdapter` is responsible for all interactions with a specific queue management system.
This includes handling all details of queue script format as well as queue submission and management.
This is the **abstract** base class defining the methods that must be implemented by the concrete classes.
Concrete classes should extend this class with implementations that work on specific queue systems.
.. note::
A `QueueAdapter` has a handler (:class:`QueueJob`) defined in qjobs.py that allows one
to contact the resource manager to get info about the status of the job.
Each concrete implementation of `QueueAdapter` should have a corresponding `QueueJob`.
"""
Error = QueueAdapterError
MaxNumLaunchesError = MaxNumLaunchesError
@classmethod
def all_qtypes(cls):
"""Return sorted list with all qtypes supported."""
return sorted([subcls.QTYPE for subcls in all_subclasses(cls)])
@classmethod
def autodoc(cls):
return """
# Dictionary with info on the hardware available on this queue.
hardware:
num_nodes: # Number of nodes available on this queue (integer, MANDATORY).
sockets_per_node: # Number of sockets per node (integer, MANDATORY).
cores_per_socket: # Number of cores per socket (integer, MANDATORY).
# The total number of cores available on this queue is
# `num_nodes * sockets_per_node * cores_per_socket`.
# Dictionary with the options used to prepare the enviroment before submitting the job
job:
setup: # List of commands (strings) executed before running (DEFAULT: empty)
omp_env: # Dictionary with OpenMP environment variables (DEFAULT: empty i.e. no OpenMP)
modules: # List of modules to be imported before running the code (DEFAULT: empty).
# NB: Error messages produced by module load are redirected to mods.err
shell_env: # Dictionary with shell environment variables.
mpi_runner: # MPI runner. Possible values in ["mpirun", "mpiexec", "srun", None]
# DEFAULT: None i.e. no mpirunner is used.
mpi_runner_options # String with optional options passed to the `mpi_runner` e.g. "--bind-to None"
shell_runner: # Used for running small sequential jobs on the front-end. Set it to None
# if mpirun or mpiexec are not available on the fron-end. If not
# given, small sequential jobs are executed with `mpi_runner`.
shell_runner_options # Similar to mpi_runner_options but for the runner used on the front-end.
pre_run: # List of commands (strings) executed before the run (DEFAULT: empty)
post_run: # List of commands (strings) executed after the run (DEFAULT: empty)
# dictionary with the name of the queue and optional parameters
# used to build/customize the header of the submission script.
queue:
qtype: # String defining the qapapter type e.g. slurm, shell ...
qname: # Name of the submission queue (string, MANDATORY)
qparams: # Dictionary with values used to generate the header of the job script
# We use the *normalized* version of the options i.e dashes in the official name
# are replaced by underscores e.g. ``--mail-type`` becomes ``mail_type``
# See pymatgen.io.abinit.qadapters.py for the list of supported values.
# Use ``qverbatim`` to pass additional options that are not included in the template.
# dictionary with the constraints that must be fulfilled in order to run on this queue.
limits:
min_cores: # Minimum number of cores (integer, DEFAULT: 1)
max_cores: # Maximum number of cores (integer, MANDATORY). Hard limit to hint_cores:
# it's the limit beyond which the scheduler will not accept the job (MANDATORY).
hint_cores: # The limit used in the initial setup of jobs.
# Fix_Critical method may increase this number until max_cores is reached
min_mem_per_proc: # Minimum memory per MPI process in Mb, units can be specified e.g. 1.4 Gb
# (DEFAULT: hardware.mem_per_core)
max_mem_per_proc: # Maximum memory per MPI process in Mb, units can be specified e.g. `1.4Gb`
# (DEFAULT: hardware.mem_per_node)
timelimit: # Initial time-limit. Accepts time according to slurm-syntax i.e:
# "days-hours" or "days-hours:minutes" or "days-hours:minutes:seconds" or
# "minutes" or "minutes:seconds" or "hours:minutes:seconds",
timelimit_hard: # The hard time-limit for this queue. Same format as timelimit.
# Error handlers could try to submit jobs with increased timelimit
# up to timelimit_hard. If not specified, timelimit_hard == timelimit
condition: # MongoDB-like condition (DEFAULT: empty, i.e. not used)
allocation: # String defining the policy used to select the optimal number of CPUs.
# possible values are in ["nodes", "force_nodes", "shared"]
# "nodes" means that we should try to allocate entire nodes if possible.
# This is a soft limit, in the sense that the qadapter may use a configuration
# that does not fulfill this requirement. In case of failure, it will try to use the
# smallest number of nodes compatible with the optimal configuration.
# Use `force_nodes` to enfore entire nodes allocation.
# `shared` mode does not enforce any constraint (DEFAULT: shared).
max_num_launches: # Limit to the number of times a specific task can be restarted (integer, DEFAULT: 5)
"""
def __init__(self, **kwargs):
"""
Args:
qname: Name of the queue.
qparams: Dictionary with the parameters used in the template.
setup: String or list of commands to execute during the initial setup.
modules: String or list of modules to load before running the application.
shell_env: Dictionary with the environment variables to export before running the application.
omp_env: Dictionary with the OpenMP variables.
pre_run: String or list of commands to execute before launching the calculation.
post_run: String or list of commands to execute once the calculation is completed.
mpi_runner: Path to the MPI runner or :class:`MpiRunner` instance. None if not used
mpi_runner_options: Optional string with options passed to the mpi_runner.
max_num_launches: Maximum number of submissions that can be done for a specific task. Defaults to 5
qverbatim:
min_cores, max_cores, hint_cores: Minimum, maximum, and hint limits of number of cores that can be used
min_mem_per_proc=Minimum memory per process in megabytes.
max_mem_per_proc=Maximum memory per process in megabytes.
timelimit: initial time limit in seconds
timelimit_hard: hard limelimit for this queue
priority: Priority level, integer number > 0
condition: Condition object (dictionary)
.. note::
priority is a non-negative integer used to order the qadapters. The :class:`TaskManager` will
try to run jobs on the qadapter with the highest priority if possible
"""
# TODO
#task_classes
# Make defensive copies so that we can change the values at runtime.
kwargs = copy.deepcopy(kwargs)
self.priority = int(kwargs.pop("priority"))
self.hw = Hardware(**kwargs.pop("hardware"))
self._parse_queue(kwargs.pop("queue"))
self._parse_limits(kwargs.pop("limits"))
self._parse_job(kwargs.pop("job"))
self.set_master_mem_overhead(kwargs.pop("master_mem_overhead", 0))
# List of dictionaries with the parameters used to submit jobs
# The launcher will use this information to increase the resources
self.launches = []
if kwargs:
raise ValueError("Found unknown keywords:\n%s" % list(kwargs.keys()))
self.validate_qparams()
# Initialize some values from the info reported in the partition.
self.set_mpi_procs(self.min_cores)
self.set_mem_per_proc(self.min_mem_per_proc)
# Final consistency check.
self.validate_qparams()
def as_dict(self):
"""
Provides a simple though not complete dict serialization of the object (OMP missing, not all limits are
kept in the dictionary, ... other things to be checked)
Raise:
`ValueError` if errors.
"""
if self.has_omp:
raise NotImplementedError('as_dict method of QueueAdapter not yet implemented when OpenMP is activated')
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'priority': self.priority,
'hardware': self.hw.as_dict(),
'queue': {'qtype': self.QTYPE,
'qname': self._qname,
'qnodes': self.qnodes,
'qparams': self._qparams},
'limits': {'timelimit_hard': self._timelimit_hard,
'timelimit': self._timelimit,
'min_cores': self.min_cores,
'max_cores': self.max_cores,
'min_mem_per_proc': self.min_mem_per_proc,
'max_mem_per_proc': self.max_mem_per_proc,
'memory_policy': self.memory_policy
},
'job': {},
'mpi_procs': self._mpi_procs,
'mem_per_proc': self._mem_per_proc,
'master_mem_overhead': self._master_mem_overhead
}
@classmethod
def from_dict(cls, dd):
priority = dd.pop('priority')
hardware = dd.pop('hardware')
queue = dd.pop('queue')
limits = dd.pop('limits')
job = dd.pop('job')
qa = make_qadapter(priority=priority, hardware=hardware, queue=queue, limits=limits, job=job)
qa.set_mpi_procs(dd.pop('mpi_procs'))
qa.set_mem_per_proc(dd.pop('mem_per_proc'))
qa.set_master_mem_overhead(dd.pop('master_mem_overhead', 0))
timelimit = dd.pop('timelimit', None)
if timelimit is not None:
qa.set_timelimit(timelimit=timelimit)
dd.pop('@module', None)
dd.pop('@class', None)
if dd:
raise ValueError("Found unknown keywords:\n%s" % list(dd.keys()))
return qa
def validate_qparams(self):
"""
Check if the keys specified by the user in qparams are supported.
Raise:
`ValueError` if errors.
"""
# No validation for ShellAdapter.
if isinstance(self, ShellAdapter): return
# Parse the template so that we know the list of supported options.
err_msg = ""
for param in self.qparams:
if param not in self.supported_qparams:
err_msg += "Unsupported QUEUE parameter name %s\n" % param
err_msg += "Supported parameters:\n"
for param_sup in self.supported_qparams:
err_msg += " %s \n" % param_sup
if err_msg:
raise ValueError(err_msg)
def _parse_limits(self, d):
# Time limits.
self.set_timelimit(qu.timelimit_parser(d.pop("timelimit")))
tl_hard = d.pop("timelimit_hard",None)
tl_hard = qu.timelimit_parser(tl_hard) if tl_hard is not None else self.timelimit
self.set_timelimit_hard(tl_hard)
# Cores
self.min_cores = int(d.pop("min_cores", 1))
self.max_cores = int(d.pop("max_cores"))
self.hint_cores = int(d.pop("hint_cores", self.max_cores))
self.memory_policy = d.pop("memory_policy", "mem")
if self.min_cores > self.max_cores:
raise ValueError("min_cores %s cannot be greater than max_cores %s" % (self.min_cores, self.max_cores))
# Memory
# FIXME: Neeed because autoparal 1 with paral_kgb 1 is not able to estimate memory
self.min_mem_per_proc = qu.any2mb(d.pop("min_mem_per_proc", self.hw.mem_per_core))
self.max_mem_per_proc = qu.any2mb(d.pop("max_mem_per_proc", self.hw.mem_per_node))
# Misc
self.max_num_launches = int(d.pop("max_num_launches", 5))
self.condition = Condition(d.pop("condition", {}))
self.allocation = d.pop("allocation", "shared")
if self.allocation not in ("nodes", "force_nodes", "shared"):
raise ValueError("Wrong value for `allocation` option")
if d:
raise ValueError("Found unknown keyword(s) in limits section:\n %s" % list(d.keys()))
def _parse_job(self, d):
setup = d.pop("setup", None)
if is_string(setup): setup = [setup]
self.setup = setup[:] if setup is not None else []
omp_env = d.pop("omp_env", None)
self.omp_env = omp_env.copy() if omp_env is not None else {}
modules = d.pop("modules", None)
if is_string(modules): modules = [modules]
self.modules = modules[:] if modules is not None else []
shell_env = d.pop("shell_env", None)
self.shell_env = shell_env.copy() if shell_env is not None else {}
mpi_options = d.pop("mpi_runner_options", "")
self.mpi_runner = d.pop("mpi_runner", None)
if not isinstance(self.mpi_runner, MpiRunner):
self.mpi_runner = MpiRunner(self.mpi_runner, options=mpi_options)
self.shell_runner = d.pop("shell_runner", None)
shell_runner_options = d.pop("shell_runner_options", "")
if self.shell_runner is not None:
self.shell_runner = MpiRunner(self.shell_runner, options=shell_runner_options)
pre_run = d.pop("pre_run", None)
if is_string(pre_run): pre_run = [pre_run]
self.pre_run = pre_run[:] if pre_run is not None else []
post_run = d.pop("post_run", None)
if is_string(post_run): post_run = [post_run]
self.post_run = post_run[:] if post_run is not None else []
if d:
raise ValueError("Found unknown keyword(s) in job section:\n %s" % list(d.keys()))
def _parse_queue(self, d):
# Init params
qparams = d.pop("qparams", None)
self._qparams = copy.deepcopy(qparams) if qparams is not None else {}
self.set_qname(d.pop("qname", ""))
self.qnodes = d.pop("qnodes", "standard")
if self.qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if d:
raise ValueError("Found unknown keyword(s) in queue section:\n %s" % list(d.keys()))
def __str__(self):
lines = ["%s:%s" % (self.__class__.__name__, self.qname)]
app = lines.append
app("Hardware:\n" + str(self.hw))
#lines.extend(["qparams:\n", str(self.qparams)])
if self.has_omp: app(str(self.omp_env))
return "\n".join(lines)
@property
def qparams(self):
"""Dictionary with the parameters used to construct the header."""
return self._qparams
@lazy_property
def supported_qparams(self):
"""
Dictionary with the supported parameters that can be passed to the
queue manager (obtained by parsing QTEMPLATE).
"""
import re
return re.findall(r"\$\$\{(\w+)\}", self.QTEMPLATE)
@property
def has_mpi(self):
"""True if we are using MPI"""
return bool(self.mpi_runner)
@property
def has_omp(self):
"""True if we are using OpenMP threads"""
return hasattr(self, "omp_env") and bool(getattr(self, "omp_env"))
@property
def num_cores(self):
"""Total number of cores employed"""
return self.mpi_procs * self.omp_threads
@property
def omp_threads(self):
"""Number of OpenMP threads."""
if self.has_omp:
return self.omp_env["OMP_NUM_THREADS"]
else:
return 1
@property
def pure_mpi(self):
"""True if only MPI is used."""
return self.has_mpi and not self.has_omp
@property
def pure_omp(self):
"""True if only OpenMP is used."""
return self.has_omp and not self.has_mpi
@property
def hybrid_mpi_omp(self):
"""True if we are running in MPI+Openmp mode."""
return self.has_omp and self.has_mpi
@property
def run_info(self):
"""String with info on the run."""
return "MPI: %d, OMP: %d" % (self.mpi_procs, self.omp_threads)
def deepcopy(self):
"""Deep copy of the object."""
return copy.deepcopy(self)
def record_launch(self, queue_id): # retcode):
"""Save submission"""
self.launches.append(
AttrDict(queue_id=queue_id, mpi_procs=self.mpi_procs, omp_threads=self.omp_threads,
mem_per_proc=self.mem_per_proc, timelimit=self.timelimit))
return len(self.launches)
def remove_launch(self, index):
"""Remove launch with the given index."""
self.launches.pop(index)
@property
def num_launches(self):
"""Number of submission tried with this adapter so far."""
return len(self.launches)
@property
def last_launch(self):
"""Return the last launch."""
if len(self.launches) > 0:
return self.launches[-1]
else:
return None
def validate(self):
"""Validate the parameters of the run. Raises self.Error if invalid parameters."""
errors = []
app = errors.append
if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:
app("self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied")
if self.omp_threads > self.hw.cores_per_node:
app("omp_threads > hw.cores_per_node")
if self.mem_per_proc > self.hw.mem_per_node:
app("mem_mb >= self.hw.mem_per_node")
if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:
app("self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied")
if self.priority <= 0:
app("priority must be > 0")
if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):
app("1 <= min_cores <= hardware num_cores >= hint_cores not satisfied")
if errors:
raise self.Error(str(self) + "\n".join(errors))
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self._mpi_procs
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to mpi_procs"""
self._mpi_procs = mpi_procs
@property
def qname(self):
"""The name of the queue."""
return self._qname
def set_qname(self, qname):
"""Set the name of the queue."""
self._qname = qname
# todo this assumes only one wall time. i.e. the one in the mananager file is the one always used.
# we should use the standard walltime to start with but also allow to increase the walltime
@property
def timelimit(self):
"""Returns the walltime in seconds."""
return self._timelimit
@property
def timelimit_hard(self):
"""Returns the walltime in seconds."""
return self._timelimit_hard
def set_timelimit(self, timelimit):
"""Set the start walltime in seconds, fix method may increase this one until timelimit_hard is reached."""
self._timelimit = timelimit
def set_timelimit_hard(self, timelimit_hard):
"""Set the maximal possible walltime in seconds."""
self._timelimit_hard = timelimit_hard
@property
def mem_per_proc(self):
"""The memory per process in megabytes."""
return self._mem_per_proc
@property
def master_mem_overhead(self):
"""The memory overhead for the master process in megabytes."""
return self._master_mem_overhead
def set_mem_per_proc(self, mem_mb):
"""
Set the memory per process in megabytes. If mem_mb <=0, min_mem_per_proc is used.
"""
# Hack needed because abinit is still not able to estimate memory.
# COMMENTED by David.
# This is not needed anymore here because the "hack" is performed directly in select_qadapter/_use_qadpos_pconf
# methods of TaskManager. Moreover, this hack should be performed somewhere else (this part should be
# independent of abinit ... and if we want to have less memory than the average memory available per node, we
# have to allow it!)
#if mem_mb <= self.min_mem_per_proc: mem_mb = self.min_mem_per_proc
self._mem_per_proc = int(mem_mb)
def set_master_mem_overhead(self, mem_mb):
"""
Set the memory overhead for the master process in megabytes.
"""
if mem_mb < 0:
raise ValueError("Memory overhead for the master process should be >= 0")
self._master_mem_overhead = int(mem_mb)
@property
def total_mem(self):
"""Total memory required by the job in megabytes."""
return Memory(self.mem_per_proc * self.mpi_procs + self.master_mem_overhead, "Mb")
@abc.abstractmethod
def cancel(self, job_id):
"""
Cancel the job.
Args:
job_id: Job identifier.
Returns:
Exit status.
"""
def can_run_pconf(self, pconf):
"""True if the qadapter in principle is able to run the :class:`ParalConf` pconf"""
if not self.hint_cores >= pconf.num_cores >= self.min_cores: return False
if not self.hw.can_use_omp_threads(self.omp_threads): return False
if pconf.mem_per_proc > self.hw.mem_per_node: return False
if self.allocation == "force_nodes" and pconf.num_cores % self.hw.cores_per_node != 0:
return False
return self.condition(pconf)
def distribute(self, mpi_procs, omp_threads, mem_per_proc):
"""
Returns (num_nodes, mpi_per_node)
Aggressive: When Open MPI thinks that it is in an exactly- or under-subscribed mode
(i.e., the number of running processes is equal to or less than the number of available processors),
MPI processes will automatically run in aggressive mode, meaning that they will never voluntarily give
up the processor to other processes. With some network transports, this means that Open MPI will spin
in tight loops attempting to make message passing progress, effectively causing other processes to not get
any CPU cycles (and therefore never make any progress)
"""
class Distrib(namedtuple("Distrib", "num_nodes mpi_per_node exact")):
pass
#@property
#def mem_per_node
# return self.mpi_per_node * mem_per_proc
#def set_nodes(self, nodes):
hw = self.hw
# TODO: Add check on user-memory
if mem_per_proc <= 0:
logger.warning("mem_per_proc <= 0")
mem_per_proc = hw.mem_per_core
if mem_per_proc > hw.mem_per_node:
raise self.Error(
"mem_per_proc > mem_per_node.\n Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
# Try to use all then cores in the node.
num_nodes, rest_cores = hw.divmod_node(mpi_procs, omp_threads)
if num_nodes == 0 and mpi_procs * mem_per_proc <= hw.mem_per_node:
# One node is enough
return Distrib(num_nodes=1, mpi_per_node=mpi_procs, exact=True)
if num_nodes == 0: num_nodes = 2
mpi_per_node = mpi_procs // num_nodes
if mpi_per_node * mem_per_proc <= hw.mem_per_node and rest_cores == 0:
# Commensurate with nodes.
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=True)
#if mode == "block", "cyclic"
# Try first to pack MPI processors in a node as much as possible
mpi_per_node = int(hw.mem_per_node / mem_per_proc)
assert mpi_per_node != 0
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
print("exact --> false", num_nodes, mpi_per_node)
if mpi_per_node * omp_threads <= hw.cores_per_node and mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
if (mpi_procs * omp_threads) % mpi_per_node != 0:
# Have to reduce the number of MPI procs per node
for mpi_per_node in reversed(range(1, mpi_per_node)):
if mpi_per_node > hw.cores_per_node: continue
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
if (mpi_procs * omp_threads) % mpi_per_node == 0 and mpi_per_node * mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
else:
raise self.Error("Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
def optimize_params(self, qnodes=None):
"""
This method is called in get_subs_dict. Return a dict with parameters to be added to qparams
Subclasses may provide a specialized version.
"""
logger.debug("optimize_params of baseclass --> no optimization available!!!")
return {}
def get_subs_dict(self, qnodes=None):
"""
Return substitution dict for replacements into the template
Subclasses may want to customize this method.
"""
#d = self.qparams.copy()
d = self.qparams
d.update(self.optimize_params(qnodes=qnodes))
# clean null values
subs_dict = {k: v for k, v in d.items() if v is not None}
#print("subs_dict:", subs_dict)
return subs_dict
def _make_qheader(self, job_name, qout_path, qerr_path):
"""Return a string with the options that are passed to the resource manager."""
# get substitution dict for replacements into the template
subs_dict = self.get_subs_dict()
# Set job_name and the names for the stderr and stdout of the
# queue manager (note the use of the extensions .qout and .qerr
# so that we can easily locate this file.
subs_dict['job_name'] = job_name.replace('/', '_')
subs_dict['_qout_path'] = qout_path
subs_dict['_qerr_path'] = qerr_path
qtemplate = QScriptTemplate(self.QTEMPLATE)
# might contain unused parameters as leftover $$.
unclean_template = qtemplate.safe_substitute(subs_dict)
# Remove lines with leftover $$.
clean_template = []
for line in unclean_template.split('\n'):
if '$$' not in line:
clean_template.append(line)
return '\n'.join(clean_template)
def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path,
stdin=None, stdout=None, stderr=None, exec_args=None):
"""
Returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
job_name: Name of the job.
launch_dir: (str) The directory the job will be launched in.
executable: String with the name of the executable to be executed or list of commands
qout_path Path of the Queue manager output file.
qerr_path: Path of the Queue manager error file.
exec_args: List of arguments passed to executable (used only if executable is a string, default: empty)
"""
# PbsPro does not accept job_names longer than 15 chars.
if len(job_name) > 14 and isinstance(self, PbsProAdapter):
job_name = job_name[:14]
# Construct the header for the Queue Manager.
qheader = self._make_qheader(job_name, qout_path, qerr_path)
# Add the bash section.
se = ScriptEditor()
# Cd to launch_dir immediately.
se.add_line("cd " + os.path.abspath(launch_dir))
if self.setup:
se.add_comment("Setup section")
se.add_lines(self.setup)
se.add_emptyline()
if self.modules:
# stderr is redirected to mods.err file.
# module load 2>> mods.err
se.add_comment("Load Modules")
se.add_line("module purge")
se.load_modules(self.modules)
se.add_emptyline()
se.add_comment("OpenMp Environment")
if self.has_omp:
se.declare_vars(self.omp_env)
se.add_emptyline()
else:
se.declare_vars({"OMP_NUM_THREADS": 1})
if self.shell_env:
se.add_comment("Shell Environment")
se.declare_vars(self.shell_env)
se.add_emptyline()
if self.pre_run:
se.add_comment("Commands before execution")
se.add_lines(self.pre_run)
se.add_emptyline()
# Construct the string to run the executable with MPI and mpi_procs.
if is_string(executable):
line = self.mpi_runner.string_to_run(self, executable,
stdin=stdin, stdout=stdout, stderr=stderr, exec_args=exec_args)
se.add_line(line)
else:
assert isinstance(executable, (list, tuple))
se.add_lines(executable)
if self.post_run:
se.add_emptyline()
se.add_comment("Commands after execution")
se.add_lines(self.post_run)
return qheader + se.get_script_str() + "\n"
def submit_to_queue(self, script_file):
"""
Public API: wraps the concrete implementation _submit_to_queue
Raises:
`self.MaxNumLaunchesError` if we have already tried to submit the job max_num_launches
`self.Error` if generic error
"""
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
if self.num_launches == self.max_num_launches:
raise self.MaxNumLaunchesError("num_launches %s == max_num_launches %s" % (self.num_launches, self.max_num_launches))
# Call the concrete implementation.
s = self._submit_to_queue(script_file)
self.record_launch(s.qid)
if s.qid is None:
raise self.Error("Error in job submission with %s. file %s \n" %
(self.__class__.__name__, script_file) +
"The error response reads:\n %s \n " % s.err +
"The out response reads:\n %s \n" % s.out)
# Here we create a concrete instance of QueueJob
return QueueJob.from_qtype_and_id(self.QTYPE, s.qid, self.qname), s.process
@abc.abstractmethod
def _submit_to_queue(self, script_file):
"""
Submits the job to the queue, probably using subprocess or shutil
This method must be provided by the concrete classes and will be called by submit_to_queue
Args:
script_file: (str) name of the script file to use (String)
Returns:
queue_id, process
"""
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
if username is None: username = getpass.getuser()
njobs, process = self._get_njobs_in_queue(username=username)
if process is not None and process.returncode != 0:
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue' +
'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if not isinstance(self, ShellAdapter):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
@abc.abstractmethod
def _get_njobs_in_queue(self, username):
"""
Concrete Subclasses must implement this method. Return (njobs, process)
"""
# Methods to fix problems
def add_exclude_nodes(self, nodes):
return _EXCL_NODES_FILE.add_nodes(self.qname, nodes)
def get_exclude_nodes(self):
return _EXCL_NODES_FILE.read_nodes(self.qname)
@abc.abstractmethod
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation. Return True if nodes have been excluded"""
def more_mem_per_proc(self, factor=1):
"""
Method to increase the amount of memory asked for, by factor.
Return: new memory if success, 0 if memory cannot be increased.
"""
base_increase = 2000
old_mem = self.mem_per_proc
new_mem = old_mem + factor*base_increase
if new_mem < self.hw.mem_per_node:
self.set_mem_per_proc(new_mem)
return new_mem
raise self.Error('could not increase mem_per_proc further')
def more_master_mem_overhead(self, mem_increase_mb=1000):
"""
Method to increase the amount of memory overheaded asked for the master node.
Return: new master memory overhead if success, 0 if it cannot be increased.
"""
old_master_mem_overhead = self.master_mem_overhead
new_master_mem_overhead = old_master_mem_overhead + mem_increase_mb
if new_master_mem_overhead + self.mem_per_proc < self.hw.mem_per_node:
self.set_master_mem_overhead(new_master_mem_overhead)
return new_master_mem_overhead
raise self.Error('could not increase master_mem_overhead further')
def more_cores(self, factor=1):
"""
Method to increase the number of MPI procs.
Return: new number of processors if success, 0 if processors cannot be increased.
"""
# TODO : find a formula that works for all max_cores
if self.max_cores > 40:
base_increase = 4 * int(self.max_cores / 40)
else:
base_increase = 4
new_cores = self.hint_cores + factor * base_increase
if new_cores < self.max_cores:
self.hint_cores = new_cores
return new_cores
raise self.Error('%s hint_cores reached limit on max_core %s' % (new_cores, self.max_cores))
def more_time(self, factor=1):
"""
Method to increase the wall time
"""
base_increase = int(self.timelimit_hard / 10)
new_time = self.timelimit + base_increase*factor
print('qadapter: trying to increase time')
if new_time < self.timelimit_hard:
self.set_timelimit(new_time)
print('new time set: ', new_time)
return new_time
self.priority = -1
raise self.Error("increasing time is not possible, the hard limit has been reached")
####################
# Concrete classes #
####################
class ShellAdapter(QueueAdapter):
"""Simple Adapter used to submit runs through the shell."""
QTYPE = "shell"
QTEMPLATE = """\
#!/bin/bash
$${qverbatim}
"""
def cancel(self, job_id):
return os.system("kill -9 %d" % job_id)
def _submit_to_queue(self, script_file):
# submit the job, return process and pid.
process = Popen(("/bin/bash", script_file), stderr=PIPE)
return SubmitResults(qid=process.pid, out='no out in shell submission', err='no err in shell submission', process=process)
def _get_njobs_in_queue(self, username):
return None, None
def exclude_nodes(self, nodes):
return False
class SlurmAdapter(QueueAdapter):
"""Adapter for SLURM."""
QTYPE = "slurm"
QTEMPLATE = """\
#!/bin/bash
#SBATCH --partition=$${partition}
#SBATCH --job-name=$${job_name}
#SBATCH --nodes=$${nodes}
#SBATCH --total_tasks=$${total_tasks}
#SBATCH --ntasks=$${ntasks}
#SBATCH --ntasks-per-node=$${ntasks_per_node}
#SBATCH --cpus-per-task=$${cpus_per_task}
#####SBATCH --mem=$${mem}
#SBATCH --mem-per-cpu=$${mem_per_cpu}
#SBATCH --hint=$${hint}
#SBATCH --time=$${time}
#SBATCH --exclude=$${exclude_nodes}
#SBATCH --account=$${account}
#SBATCH --mail-user=$${mail_user}
#SBATCH --mail-type=$${mail_type}
#SBATCH --constraint=$${constraint}
#SBATCH --gres=$${gres}
#SBATCH --requeue=$${requeue}
#SBATCH --nodelist=$${nodelist}
#SBATCH --propagate=$${propagate}
#SBATCH --licenses=$${licenses}
#SBATCH --output=$${_qout_path}
#SBATCH --error=$${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SlurmAdapter, self).set_qname(qname)
if qname:
self.qparams["partition"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SlurmAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ntasks"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SlurmAdapter, self).set_omp_threads(omp_threads)
self.qparams["cpus_per_task"] = omp_threads
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SlurmAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_cpu"] = self.mem_per_proc
# Remove mem if it's defined.
#self.qparams.pop("mem", None)
def set_timelimit(self, timelimit):
super(SlurmAdapter, self).set_timelimit(timelimit)
self.qparams["time"] = qu.time2slurm(timelimit)
def cancel(self, job_id):
return os.system("scancel %d" % job_id)
def optimize_params(self, qnodes=None):
params = {}
if self.allocation == "nodes":
# run on the smallest number of nodes compatible with the configuration
params["nodes"] = max(int(math.ceil(self.mpi_procs / self.hw.cores_per_node)),
int(math.ceil(self.total_mem / self.hw.mem_per_node)))
return params
#dist = self.distribute(self.mpi_procs, self.omp_threads, self.mem_per_proc)
##print(dist)
#if False and dist.exact:
# # Can optimize parameters
# self.qparams["nodes"] = dist.num_nodes
# self.qparams.pop("ntasks", None)
# self.qparams["ntasks_per_node"] = dist.mpi_per_node
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem"] = dist.mpi_per_node * self.mem_per_proc
# self.qparams.pop("mem_per_cpu", None)
#else:
# # Delegate to slurm.
# self.qparams["ntasks"] = self.mpi_procs
# self.qparams.pop("nodes", None)
# self.qparams.pop("ntasks_per_node", None)
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem_per_cpu"] = self.mem_per_proc
# self.qparams.pop("mem", None)
#return {}
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['sbatch', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['sbatch', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
# grab the returncode. SLURM returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split()[3])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
logger.critical('Could not parse job id following slurm...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
try:
if 'exclude_nodes' not in self.qparams:
self.qparams.update({'exclude_nodes': 'node' + nodes[0]})
print('excluded node %s' % nodes[0])
for node in nodes[1:]:
self.qparams['exclude_nodes'] += ',node' + node
print('excluded node %s' % node)
return True
except (KeyError, IndexError):
raise self.Error('qadapter failed to exclude nodes')
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['squeue', '-o "%u"', '-u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['squeue', '-o "%u"', '-u', username], stdout=PIPE, stderr=PIPE,
universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result. lines should have this form:
# username
# count lines that include the username in it
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class PbsProAdapter(QueueAdapter):
"""Adapter for PbsPro"""
QTYPE = "pbspro"
#PBS -l select=$${select}:ncpus=$${ncpus}:mem=$${mem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
#PBS -l select=$${select}:ncpus=1:mem=$${mem}mb:mpiprocs=1:ompthreads=$${ompthreads}
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l select=$${select}
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(PbsProAdapter, self).set_qname(qname)
if qname:
self.qparams["queue"] = qname
def set_timelimit(self, timelimit):
super(PbsProAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(PbsProAdapter, self).set_mem_per_proc(mem_mb)
#self.qparams["mem"] = self.mem_per_proc
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def optimize_params(self, qnodes=None):
return {"select": self.get_select(qnodes=qnodes)}
def get_select(self, ret_dict=False, qnodes=None, memory_policy=None):
"""
Select is not the most intuitive command. For more info see:
* http://www.cardiff.ac.uk/arcca/services/equipment/User-Guide/pbs.html
* https://portal.ivec.org/docs/Supercomputers/PBS_Pro
"""
hw, mem_per_proc = self.hw, int(self.mem_per_proc)
#dist = self.distribute(self.mpi_procs, self.omp_threads, mem_per_proc)
"""
if self.pure_mpi:
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
if num_nodes == 0:
logger.info("IN_CORE PURE MPI: %s" % self.run_info)
chunks = 1
ncpus = rest_cores
mpiprocs = rest_cores
mem = mem_per_proc * ncpus
ompthreads = 1
elif rest_cores == 0:
# Can allocate entire nodes because self.mpi_procs is divisible by cores_per_node.
logger.info("PURE MPI run commensurate with cores_per_node %s" % self.run_info)
chunks = num_nodes
ncpus = hw.cores_per_node
mpiprocs = hw.cores_per_node
mem = ncpus * mem_per_proc
ompthreads = 1
else:
logger.info("OUT-OF-CORE PURE MPI (not commensurate with cores_per_node): %s" % self.run_info)
chunks = self.mpi_procs
ncpus = 1
mpiprocs = 1
mem = mem_per_proc
ompthreads = 1
elif self.pure_omp:
# Pure OMP run.
logger.info("PURE OPENMP run: %s" % self.run_info)
assert hw.can_use_omp_threads(self.omp_threads)
chunks = 1
ncpus = self.omp_threads
mpiprocs = 1
mem = mem_per_proc
ompthreads = self.omp_threads
elif self.hybrid_mpi_omp:
assert hw.can_use_omp_threads(self.omp_threads)
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
#print(num_nodes, rest_cores)
# TODO: test this
if rest_cores == 0 or num_nodes == 0:
logger.info("HYBRID MPI-OPENMP run, perfectly divisible among nodes: %s" % self.run_info)
chunks = max(num_nodes, 1)
mpiprocs = self.mpi_procs // chunks
chunks = chunks
ncpus = mpiprocs * self.omp_threads
mpiprocs = mpiprocs
mem = mpiprocs * mem_per_proc
ompthreads = self.omp_threads
else:
logger.info("HYBRID MPI-OPENMP, NOT commensurate with nodes: %s" % self.run_info)
chunks=self.mpi_procs
ncpus=self.omp_threads
mpiprocs=1
mem= mem_per_proc
ompthreads=self.omp_threads
else:
raise RuntimeError("You should not be here")
"""
if memory_policy is None:
memory_policy = self.memory_policy
if qnodes is None:
qnodes = self.qnodes
else:
if qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if qnodes == "standard":
return self._get_select_standard(ret_dict=ret_dict, memory_policy=memory_policy)
else:
return self._get_select_with_master_mem_overhead(ret_dict=ret_dict, qnodes=qnodes,
memory_policy=memory_policy)
def _get_select_with_master_mem_overhead(self, ret_dict=False, qnodes=None, memory_policy='mem'):
if self.has_omp:
raise NotImplementedError("select with master mem overhead not yet implemented with has_omp")
if qnodes is None:
qnodes = self.qnodes
else:
if qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if qnodes == "exclusive":
return self._get_select_with_master_mem_overhead_exclusive(ret_dict=ret_dict, memory_policy=memory_policy)
elif qnodes == "shared":
return self._get_select_with_master_mem_overhead_shared(ret_dict=ret_dict, memory_policy=memory_policy)
else:
raise ValueError("Wrong value of qnodes parameter : {}".format(self.qnodes))
def _get_select_with_master_mem_overhead_shared(self, ret_dict=False, memory_policy='mem'):
chunk_master, ncpus_master, vmem_master, mpiprocs_master = 1, 1, self.mem_per_proc+self.master_mem_overhead, 1
if self.mpi_procs > 1:
chunks_slaves, ncpus_slaves, vmem_slaves, mpiprocs_slaves = self.mpi_procs - 1, 1, self.mem_per_proc, 1
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master),
chunks_slaves=chunks_slaves, ncpus_slaves=ncpus_slaves,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(vmem_slaves))
if memory_policy == 'vmem':
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_slaves}:vmem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
elif memory_policy == 'mem':
s = "{chunk_master}:ncpus={ncpus_master}:mem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_slaves}:mem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master + chunks_slaves*ncpus_slaves
if tot_ncpus != self.mpi_procs:
raise ValueError('Total number of cpus is different from mpi_procs ...')
else:
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master))
if memory_policy == 'vmem':
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:" \
"mpiprocs={mpiprocs_master}".format(**select_params)
elif memory_policy == 'mem':
s = "{chunk_master}:ncpus={ncpus_master}:mem={vmem_master}mb:" \
"mpiprocs={mpiprocs_master}".format(**select_params)
if ret_dict:
return s, select_params
return s
def _get_select_with_master_mem_overhead_exclusive(self, ret_dict=False, memory_policy='mem'):
max_ncpus_master = min(self.hw.cores_per_node,
int((self.hw.mem_per_node-self.mem_per_proc-self.master_mem_overhead)
/ self.mem_per_proc) + 1)
if max_ncpus_master >= self.mpi_procs:
chunk, ncpus, mem, mpiprocs = 1, self.mpi_procs, self.hw.mem_per_node, self.mpi_procs
if memory_policy == 'vmem':
select_params = AttrDict(chunks=chunk, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(mem))
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunks=chunk, ncpus=ncpus, mpiprocs=mpiprocs, mem=int(mem))
s = "{chunks}:ncpus={ncpus}:mem={mem}mb:mpiprocs={mpiprocs}".format(**select_params)
tot_ncpus = chunk*ncpus
else:
ncpus_left = self.mpi_procs-max_ncpus_master
max_ncpus_per_slave_node = min(self.hw.cores_per_node, int(self.hw.mem_per_node/self.mem_per_proc))
nslaves_float = float(ncpus_left)/float(max_ncpus_per_slave_node)
ncpus_per_slave = max_ncpus_per_slave_node
mpiprocs_slaves = max_ncpus_per_slave_node
chunk_master = 1
mem_slaves = self.hw.mem_per_node
explicit_last_slave = False
chunk_last_slave, ncpus_last_slave, mem_last_slave, mpiprocs_last_slave = None, None, None, None
if nslaves_float > int(nslaves_float):
chunks_slaves = int(nslaves_float) + 1
pot_ncpus_all_slaves = chunks_slaves*ncpus_per_slave
if pot_ncpus_all_slaves >= self.mpi_procs-1:
explicit_last_slave = True
chunks_slaves = chunks_slaves-1
chunk_last_slave = 1
ncpus_master = 1
ncpus_last_slave = self.mpi_procs - 1 - chunks_slaves*ncpus_per_slave
mem_last_slave = self.hw.mem_per_node
mpiprocs_last_slave = ncpus_last_slave
else:
ncpus_master = self.mpi_procs-pot_ncpus_all_slaves
if ncpus_master > max_ncpus_master:
raise ValueError('ncpus for the master node exceeds the maximum ncpus for the master ... this'
'should not happen ...')
if ncpus_master < 1:
raise ValueError('ncpus for the master node is 0 ... this should not happen ...')
elif nslaves_float == int(nslaves_float):
chunks_slaves = int(nslaves_float)
ncpus_master = max_ncpus_master
else:
raise ValueError('nslaves_float < int(nslaves_float) ...')
mem_master, mpiprocs_master = self.hw.mem_per_node, ncpus_master
if explicit_last_slave:
if memory_policy == 'vmem':
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(mem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(mem_slaves),
chunk_last_slave=chunk_last_slave, ncpus_last_slave=ncpus_last_slave,
vmem_last_slave=int(mem_last_slave),
mpiprocs_last_slave=mpiprocs_last_slave)
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:vmem={vmem_slaves}mb:mpiprocs={mpiprocs_slaves}+" \
"{chunk_last_slave}:ncpus={ncpus_last_slave}:vmem={vmem_last_slave}mb:" \
"mpiprocs={mpiprocs_last_slave}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, mem_master=int(mem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, mem_slaves=int(mem_slaves),
chunk_last_slave=chunk_last_slave, ncpus_last_slave=ncpus_last_slave,
mem_last_slave=int(mem_last_slave),
mpiprocs_last_slave=mpiprocs_last_slave)
s = "{chunk_master}:ncpus={ncpus_master}:mem={mem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:mem={mem_slaves}mb:mpiprocs={mpiprocs_slaves}+" \
"{chunk_last_slave}:ncpus={ncpus_last_slave}:mem={mem_last_slave}mb:" \
"mpiprocs={mpiprocs_last_slave}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master+chunks_slaves*ncpus_per_slave+chunk_last_slave*ncpus_last_slave
else:
if memory_policy == 'vmem':
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(mem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(mem_slaves))
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:vmem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, mem_master=int(mem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, mem_slaves=int(mem_slaves))
s = "{chunk_master}:ncpus={ncpus_master}:mem={mem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:mem={mem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master + chunks_slaves*ncpus_per_slave
if tot_ncpus != self.mpi_procs:
raise ValueError('Total number of cpus is different from mpi_procs ...')
if ret_dict:
return s, select_params
return s
def _get_select_standard(self, ret_dict=False, memory_policy='mem'):
if not self.has_omp:
chunks, ncpus, mem, mpiprocs = self.mpi_procs, 1, self.mem_per_proc, 1
if memory_policy == 'vmem':
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(mem))
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, mem=int(mem))
s = "{chunks}:ncpus={ncpus}:mem={mem}mb:mpiprocs={mpiprocs}".format(**select_params)
else:
chunks, ncpus, mem, mpiprocs, ompthreads = self.mpi_procs, self.omp_threads, self.mem_per_proc, 1, self.omp_threads
if memory_policy == 'vmem':
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(mem),
ompthreads=ompthreads)
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}:ompthreads={ompthreads}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, mem=int(mem),
ompthreads=ompthreads)
s = "{chunks}:ncpus={ncpus}:mem={mem}mb:mpiprocs={mpiprocs}:ompthreads={ompthreads}".format(
**select_params)
if ret_dict:
return s, select_params
return s
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
# grab the return code. PBS returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split('.')[0])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['qstat', '-a', '-u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['qstat', '-a', '-u', username], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form
# '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09'
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.split('\n')
njobs = len([line.split() for line in outs if username in line])
return njobs, process
def exclude_nodes(self, nodes):
return False
class TorqueAdapter(PbsProAdapter):
"""Adapter for Torque."""
QTYPE = "torque"
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
####PBS -l mppwidth=$${mppwidth}
#PBS -l nodes=$${nodes}:ppn=$${ppn}
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
# Submission environment
#PBS -V
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
QueueAdapter.set_mem_per_proc(self, mem_mb)
#self.qparams["mem"] = self.mem_per_proc
#@property
#def mpi_procs(self):
# """Number of MPI processes."""
# return self.qparams.get("nodes", 1) * self.qparams.get("ppn", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
QueueAdapter.set_mpi_procs(self, mpi_procs)
self.qparams["nodes"] = 1
self.qparams["ppn"] = mpi_procs
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in torque')
class SGEAdapter(QueueAdapter):
"""
Adapter for Sun Grid Engine (SGE) task submission software.
See also:
* https://www.wiki.ed.ac.uk/display/EaStCHEMresearchwiki/How+to+write+a+SGE+job+submission+script
* http://www.uibk.ac.at/zid/systeme/hpc-systeme/common/tutorials/sge-howto.html
"""
QTYPE = "sge"
QTEMPLATE = """\
#!/bin/bash
#$ -account_name $${account_name}
#$ -N $${job_name}
#$ -q $${queue_name}
#$ -pe $${parallel_environment} $${ncpus}
#$ -l h_rt=$${walltime}
# request a per slot memory limit of size bytes.
##$ -l h_vmem=$${mem_per_slot}
##$ -l mf=$${mem_per_slot}
###$ -j no
#$ -M $${mail_user}
#$ -m $${mail_type}
# Submission environment
##$ -S /bin/bash
###$ -cwd # Change to current working directory
###$ -V # Export environment variables into script
#$ -e $${_qerr_path}
#$ -o $${_qout_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SGEAdapter, self).set_qname(qname)
if qname:
self.qparams["queue_name"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SGEAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ncpus"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SGEAdapter, self).set_omp_threads(omp_threads)
logger.warning("Cannot use omp_threads with SGE")
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SGEAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_slot"] = str(int(self.mem_per_proc)) + "M"
def set_timelimit(self, timelimit):
super(SGEAdapter, self).set_timelimit(timelimit)
# Same convention as pbspro e.g. [hours:minutes:]seconds
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
# grab the returncode. SGE returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form
# Your job 1659048 ("NAME_OF_JOB") has been submitted
queue_id = int(out.split(' ')[2])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation"""
raise self.Error('qadapter failed to exclude nodes, not implemented yet in sge')
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['qstat', '-u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['qstat', '-u', username], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should contain username
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class MOABAdapter(QueueAdapter):
"""Adapter for MOAB. See https://computing.llnl.gov/tutorials/moab/"""
QTYPE = "moab"
QTEMPLATE = """\
#!/bin/bash
#MSUB -a $${eligible_date}
#MSUB -A $${account}
#MSUB -c $${checkpoint_interval}
#MSUB -l feature=$${feature}
#MSUB -l gres=$${gres}
#MSUB -l nodes=$${nodes}
#MSUB -l partition=$${partition}
#MSUB -l procs=$${procs}
#MSUB -l ttc=$${ttc}
#MSUB -l walltime=$${walltime}
#MSUB -l $${resources}
#MSUB -p $${priority}
#MSUB -q $${queue}
#MSUB -S $${shell}
#MSUB -N $${job_name}
#MSUB -v $${variable_list}
#MSUB -o $${_qout_path}
#MSUB -e $${_qerr_path}
$${qverbatim}
"""
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(MOABAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["procs"] = mpi_procs
def set_timelimit(self, timelimit):
super(MOABAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2slurm(timelimit)
def set_mem_per_proc(self, mem_mb):
super(MOABAdapter, self).set_mem_per_proc(mem_mb)
#TODO
#raise NotImplementedError("set_mem_per_cpu")
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in moad')
def cancel(self, job_id):
return os.system("canceljob %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['msub', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['msub', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
queue_id = None
if process.returncode == 0:
# grab the returncode. MOAB returns 0 if the job was successful
try:
# output should be the queue_id
queue_id = int(out.split()[0])
except:
# probably error parsing job code
logger.critical('Could not parse job id following msub...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['showq', '-s -u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['showq', '-s -u', username], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form:
##
## active jobs: N eligible jobs: M blocked jobs: P
##
## Total job: 1
##
# Split the output string and return the last element.
out = out.splitlines()[-1]
njobs = int(out.split()[-1])
return njobs, process
class BlueGeneAdapter(QueueAdapter):
"""
Adapter for LoadLever on BlueGene architectures.
See:
http://www.prace-ri.eu/best-practice-guide-blue-gene-q-html/#id-1.5.4.8
https://www.lrz.de/services/compute/supermuc/loadleveler/
"""
QTYPE = "bluegene"
QTEMPLATE = """\
#!/bin/bash
# @ job_name = $${job_name}
# @ class = $${class}
# @ error = $${_qout_path}
# @ output = $${_qerr_path}
# @ wall_clock_limit = $${wall_clock_limit}
# @ notification = $${notification}
# @ notify_user = $${mail_user}
# @ environment = $${environment}
# @ account_no = $${account_no}
# @ job_type = bluegene
# @ bg_connectivity = $${bg_connectivity}
# @ bg_size = $${bg_size}
$${qverbatim}
# @ queue
"""
def set_qname(self, qname):
super(BlueGeneAdapter, self).set_qname(qname)
if qname:
self.qparams["class"] = qname
#def set_mpi_procs(self, mpi_procs):
# """Set the number of CPUs used for MPI."""
# super(BlueGeneAdapter, self).set_mpi_procs(mpi_procs)
# #self.qparams["ntasks"] = mpi_procs
#def set_omp_threads(self, omp_threads):
# super(BlueGeneAdapter, self).set_omp_threads(omp_threads)
# #self.qparams["cpus_per_task"] = omp_threads
#def set_mem_per_proc(self, mem_mb):
# """Set the memory per process in megabytes"""
# super(BlueGeneAdapter, self).set_mem_per_proc(mem_mb)
# #self.qparams["mem_per_cpu"] = self.mem_per_proc
def set_timelimit(self, timelimit):
"""Limits are specified with the format hh:mm:ss (hours:minutes:seconds)"""
super(BlueGeneAdapter, self).set_timelimit(timelimit)
self.qparams["wall_clock_limit"] = qu.time2loadlever(timelimit)
def cancel(self, job_id):
return os.system("llcancel %d" % job_id)
def bgsize_rankspernode(self):
"""Return (bg_size, ranks_per_node) from mpi_procs and omp_threads."""
bg_size = int(math.ceil((self.mpi_procs * self.omp_threads)/ self.hw.cores_per_node))
bg_size = max(bg_size, 32) # TODO hardcoded
ranks_per_node = int(math.ceil(self.mpi_procs / bg_size))
return bg_size, ranks_per_node
def optimize_params(self, qnodes=None):
params = {}
bg_size, rpn = self.bgsize_rankspernode()
print("in optimize params")
print("mpi_procs:", self.mpi_procs, "omp_threads:",self.omp_threads)
print("bg_size:",bg_size,"ranks_per_node",rpn)
return {"bg_size": bg_size}
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['llsubmit', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['llsubmit', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
# grab the return code. llsubmit returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# on JUQUEEN, output should of the form
#llsubmit: The job "juqueen1c1.zam.kfa-juelich.de.281506" has been submitted.
token = out.split()[3]
s = token.split(".")[-1].replace('"', "")
queue_id = int(s)
except:
# probably error parsing job code
logger.critical("Could not parse job id following llsubmit...")
raise
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['llq', '-u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['llq', '-u', username], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result. lines should have this form:
#
# Id Owner Submitted ST PRI Class Running On
# ------------------------ ---------- ----------- -- --- ------------ -----------
# juqueen1c1.281508.0 paj15530 1/23 13:20 I 50 n001
# 1 job step(s) in query, 1 waiting, 0 pending, 0 running, 0 held, 0 preempted
#
# count lines that include the username in it
outs = out.split('\n')
njobs = len([line.split() for line in outs if username in line])
return njobs, process
def exclude_nodes(self, nodes):
return False
|
johnson1228/pymatgen
|
pymatgen/io/abinit/qadapters.py
|
Python
|
mit
| 87,373
|
[
"ABINIT",
"pymatgen"
] |
1437b0f82bdfde33f4bb87d505e7ba21cd3109bc87ea764a6e2561493e07de2c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.